code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IntQueue(Queue):
<|reserved_special_token_0|>
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception('Queue too small!')
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IntQueue(Queue):
"""
An integer only implementation of a queue
"""
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception('Queue too small!')
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
def benchMarkTest():
"""
BenchMark IntQueue vs ArrayDeque.
"""
n = 10000000
intQ = IntQueue(n)
start = time.process_time()
for i in range(0, n):
intQ.offer(i)
for i in range(0, n):
intQ.poll()
end = time.process_time()
print('IntQueue Time: ', end - start)
arrayDeque = deque()
start = time.process_time()
for i in range(0, n):
arrayDeque.append(i)
for i in range(0, n):
arrayDeque.popleft()
end = time.process_time()
print('ArrayDeque Time: ', end - start)
if __name__ == '__main__':
"""
Example usage
"""
q = IntQueue(5)
q.offer(1)
q.offer(2)
q.offer(3)
q.offer(4)
q.offer(5)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
q.offer(1)
q.offer(2)
q.offer(3)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
benchMarkTest()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
from array import array as arr
from collections import deque
from Queue import Queue
class IntQueue(Queue):
"""
An integer only implementation of a queue
"""
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception('Queue too small!')
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
def benchMarkTest():
"""
BenchMark IntQueue vs ArrayDeque.
"""
n = 10000000
intQ = IntQueue(n)
start = time.process_time()
for i in range(0, n):
intQ.offer(i)
for i in range(0, n):
intQ.poll()
end = time.process_time()
print('IntQueue Time: ', end - start)
arrayDeque = deque()
start = time.process_time()
for i in range(0, n):
arrayDeque.append(i)
for i in range(0, n):
arrayDeque.popleft()
end = time.process_time()
print('ArrayDeque Time: ', end - start)
if __name__ == '__main__':
"""
Example usage
"""
q = IntQueue(5)
q.offer(1)
q.offer(2)
q.offer(3)
q.offer(4)
q.offer(5)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
q.offer(1)
q.offer(2)
q.offer(3)
print(q.poll())
print(q.poll())
print(q.poll())
print(q.poll())
print(q.isEmpty())
benchMarkTest()
<|reserved_special_token_1|>
'''
* @file IntQueue.py
* @author (original JAVA) William Fiset, william.alexandre.fiset@gmail.com
* liujingkun, liujkon@gmail.com
* (conversion to Python) Armin Zare Zadeh, ali.a.zarezadeh@gmail.com
* @date 23 Jun 2020
* @version 0.1
* @brief This file contains an implementation of an integer only queue.
*
'''
import time
from array import array as arr
from collections import deque
from Queue import Queue
class IntQueue(Queue):
'''
An integer only implementation of a queue
'''
def __init__(self, maxSize):
"""
maxSize is the maximum number of items
that can be in the queue at any given time
"""
self.front = 0
self.end = 0
self.qSize = 0
self.data = arr('i', (0 for i in range(maxSize)))
def isEmpty(self):
"""
Return true/false on whether the queue is empty
"""
return self.qSize == 0
def size(self):
"""
Return the number of elements inside the queue
"""
return self.qSize
def peek(self):
if self.isEmpty():
raise Exception('Queue is empty')
self.front = self.front % len(self.data)
return self.data[self.front]
def isFull(self):
return self.qSize == len(self.data)
def offer(self, value):
"""
Add an element to the queue
"""
if self.isFull():
raise Exception("Queue too small!")
self.data[self.end] = value
self.end += 1
self.qSize += 1
self.end = self.end % len(self.data)
def poll(self):
"""
Make sure you check is the queue is not empty before calling poll!
"""
if self.isEmpty():
raise Exception('Queue is empty')
self.qSize -= 1
self.front = self.front % len(self.data)
d = self.data[self.front]
self.front += 1
return d
def benchMarkTest():
"""
BenchMark IntQueue vs ArrayDeque.
"""
n = 10000000
intQ = IntQueue(n)
# IntQueue times at around 12.109375 seconds
start = time.process_time()
for i in range(0, n):
intQ.offer(i)
for i in range(0, n):
intQ.poll()
end = time.process_time()
print("IntQueue Time: ", (end - start))
# ArrayDeque times at around 1.1875 seconds
arrayDeque = deque()
start = time.process_time()
for i in range(0, n):
arrayDeque.append(i)
for i in range(0, n):
arrayDeque.popleft()
end = time.process_time()
print("ArrayDeque Time: ", (end - start))
if __name__ == '__main__':
"""
Example usage
"""
q = IntQueue(5)
q.offer(1)
q.offer(2)
q.offer(3)
q.offer(4)
q.offer(5)
print(q.poll()) # 1
print(q.poll()) # 2
print(q.poll()) # 3
print(q.poll()) # 4
print(q.isEmpty()) # false
q.offer(1);
q.offer(2);
q.offer(3);
print(q.poll()) # 5
print(q.poll()) # 1
print(q.poll()) # 2
print(q.poll()) # 3
print(q.isEmpty()) # true
benchMarkTest()
|
flexible
|
{
"blob_id": "0ed99037d7ff708b7931fbc3553b1aeb19a20f53",
"index": 810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass IntQueue(Queue):\n <mask token>\n\n def __init__(self, maxSize):\n \"\"\"\n maxSize is the maximum number of items\n that can be in the queue at any given time\n \"\"\"\n self.front = 0\n self.end = 0\n self.qSize = 0\n self.data = arr('i', (0 for i in range(maxSize)))\n\n def isEmpty(self):\n \"\"\"\n Return true/false on whether the queue is empty\n \"\"\"\n return self.qSize == 0\n\n def size(self):\n \"\"\"\n Return the number of elements inside the queue\n \"\"\"\n return self.qSize\n\n def peek(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n self.front = self.front % len(self.data)\n return self.data[self.front]\n\n def isFull(self):\n return self.qSize == len(self.data)\n\n def offer(self, value):\n \"\"\"\n Add an element to the queue\n \"\"\"\n if self.isFull():\n raise Exception('Queue too small!')\n self.data[self.end] = value\n self.end += 1\n self.qSize += 1\n self.end = self.end % len(self.data)\n\n def poll(self):\n \"\"\"\n Make sure you check is the queue is not empty before calling poll!\n \"\"\"\n if self.isEmpty():\n raise Exception('Queue is empty')\n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass IntQueue(Queue):\n \"\"\" \n An integer only implementation of a queue\n \"\"\"\n\n def __init__(self, maxSize):\n \"\"\"\n maxSize is the maximum number of items\n that can be in the queue at any given time\n \"\"\"\n self.front = 0\n self.end = 0\n self.qSize = 0\n self.data = arr('i', (0 for i in range(maxSize)))\n\n def isEmpty(self):\n \"\"\"\n Return true/false on whether the queue is empty\n \"\"\"\n return self.qSize == 0\n\n def size(self):\n \"\"\"\n Return the number of elements inside the queue\n \"\"\"\n return self.qSize\n\n def peek(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n self.front = self.front % len(self.data)\n return self.data[self.front]\n\n def isFull(self):\n return self.qSize == len(self.data)\n\n def offer(self, value):\n \"\"\"\n Add an element to the queue\n \"\"\"\n if self.isFull():\n raise Exception('Queue too small!')\n self.data[self.end] = value\n self.end += 1\n self.qSize += 1\n self.end = self.end % len(self.data)\n\n def poll(self):\n \"\"\"\n Make sure you check is the queue is not empty before calling poll!\n \"\"\"\n if self.isEmpty():\n raise Exception('Queue is empty')\n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d\n\n\ndef benchMarkTest():\n \"\"\"\n BenchMark IntQueue vs ArrayDeque.\n \"\"\"\n n = 10000000\n intQ = IntQueue(n)\n start = time.process_time()\n for i in range(0, n):\n intQ.offer(i)\n for i in range(0, n):\n intQ.poll()\n end = time.process_time()\n print('IntQueue Time: ', end - start)\n arrayDeque = deque()\n start = time.process_time()\n for i in range(0, n):\n arrayDeque.append(i)\n for i in range(0, n):\n arrayDeque.popleft()\n end = time.process_time()\n print('ArrayDeque Time: ', end - start)\n\n\nif __name__ == '__main__':\n \"\"\"\n Example usage\n \"\"\"\n q = IntQueue(5)\n q.offer(1)\n q.offer(2)\n q.offer(3)\n q.offer(4)\n q.offer(5)\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.isEmpty())\n q.offer(1)\n q.offer(2)\n q.offer(3)\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.isEmpty())\n benchMarkTest()\n",
"step-4": "<mask token>\nimport time\nfrom array import array as arr\nfrom collections import deque\nfrom Queue import Queue\n\n\nclass IntQueue(Queue):\n \"\"\" \n An integer only implementation of a queue\n \"\"\"\n\n def __init__(self, maxSize):\n \"\"\"\n maxSize is the maximum number of items\n that can be in the queue at any given time\n \"\"\"\n self.front = 0\n self.end = 0\n self.qSize = 0\n self.data = arr('i', (0 for i in range(maxSize)))\n\n def isEmpty(self):\n \"\"\"\n Return true/false on whether the queue is empty\n \"\"\"\n return self.qSize == 0\n\n def size(self):\n \"\"\"\n Return the number of elements inside the queue\n \"\"\"\n return self.qSize\n\n def peek(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n self.front = self.front % len(self.data)\n return self.data[self.front]\n\n def isFull(self):\n return self.qSize == len(self.data)\n\n def offer(self, value):\n \"\"\"\n Add an element to the queue\n \"\"\"\n if self.isFull():\n raise Exception('Queue too small!')\n self.data[self.end] = value\n self.end += 1\n self.qSize += 1\n self.end = self.end % len(self.data)\n\n def poll(self):\n \"\"\"\n Make sure you check is the queue is not empty before calling poll!\n \"\"\"\n if self.isEmpty():\n raise Exception('Queue is empty')\n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d\n\n\ndef benchMarkTest():\n \"\"\"\n BenchMark IntQueue vs ArrayDeque.\n \"\"\"\n n = 10000000\n intQ = IntQueue(n)\n start = time.process_time()\n for i in range(0, n):\n intQ.offer(i)\n for i in range(0, n):\n intQ.poll()\n end = time.process_time()\n print('IntQueue Time: ', end - start)\n arrayDeque = deque()\n start = time.process_time()\n for i in range(0, n):\n arrayDeque.append(i)\n for i in range(0, n):\n arrayDeque.popleft()\n end = time.process_time()\n print('ArrayDeque Time: ', end - start)\n\n\nif __name__ == '__main__':\n \"\"\"\n Example usage\n \"\"\"\n q = IntQueue(5)\n q.offer(1)\n q.offer(2)\n q.offer(3)\n q.offer(4)\n q.offer(5)\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.isEmpty())\n q.offer(1)\n q.offer(2)\n q.offer(3)\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.poll())\n print(q.isEmpty())\n benchMarkTest()\n",
"step-5": "'''\n * @file IntQueue.py\n * @author (original JAVA) William Fiset, william.alexandre.fiset@gmail.com\n * liujingkun, liujkon@gmail.com\n * (conversion to Python) Armin Zare Zadeh, ali.a.zarezadeh@gmail.com\n * @date 23 Jun 2020\n * @version 0.1\n * @brief This file contains an implementation of an integer only queue.\n * \n'''\n\nimport time\nfrom array import array as arr\nfrom collections import deque\nfrom Queue import Queue\n\n\nclass IntQueue(Queue):\n ''' \n An integer only implementation of a queue\n '''\n def __init__(self, maxSize):\n \"\"\"\n maxSize is the maximum number of items\n that can be in the queue at any given time\n \"\"\" \n self.front = 0\n self.end = 0\n self.qSize = 0\n self.data = arr('i', (0 for i in range(maxSize)))\n\n\n def isEmpty(self):\n \"\"\"\n Return true/false on whether the queue is empty\n \"\"\"\n return self.qSize == 0\n\n\n def size(self):\n \"\"\"\n Return the number of elements inside the queue\n \"\"\" \n return self.qSize\n\n\n def peek(self):\n if self.isEmpty():\n raise Exception('Queue is empty')\n \n self.front = self.front % len(self.data)\n return self.data[self.front]\n\n\n def isFull(self):\n return self.qSize == len(self.data)\n\n\n def offer(self, value):\n \"\"\"\n Add an element to the queue\n \"\"\"\n if self.isFull():\n raise Exception(\"Queue too small!\")\n \n self.data[self.end] = value\n self.end += 1\n self.qSize += 1\n self.end = self.end % len(self.data)\n\n\n def poll(self):\n \"\"\"\n Make sure you check is the queue is not empty before calling poll!\n \"\"\"\n if self.isEmpty():\n raise Exception('Queue is empty')\n \n self.qSize -= 1\n self.front = self.front % len(self.data)\n d = self.data[self.front]\n self.front += 1\n return d\n\n\n\ndef benchMarkTest():\n \"\"\"\n BenchMark IntQueue vs ArrayDeque.\n \"\"\" \n\n n = 10000000\n intQ = IntQueue(n)\n\n # IntQueue times at around 12.109375 seconds\n start = time.process_time()\n for i in range(0, n):\n intQ.offer(i)\n for i in range(0, n):\n intQ.poll()\n end = time.process_time()\n print(\"IntQueue Time: \", (end - start))\n\n # ArrayDeque times at around 1.1875 seconds\n arrayDeque = deque()\n start = time.process_time()\n for i in range(0, n):\n arrayDeque.append(i)\n for i in range(0, n):\n arrayDeque.popleft()\n end = time.process_time()\n print(\"ArrayDeque Time: \", (end - start))\n\n\n\nif __name__ == '__main__':\n \"\"\"\n Example usage\n \"\"\"\n\n q = IntQueue(5)\n\n q.offer(1)\n q.offer(2)\n q.offer(3)\n q.offer(4)\n q.offer(5)\n\n print(q.poll()) # 1\n print(q.poll()) # 2\n print(q.poll()) # 3\n print(q.poll()) # 4\n\n print(q.isEmpty()) # false\n\n q.offer(1);\n q.offer(2);\n q.offer(3);\n\n print(q.poll()) # 5\n print(q.poll()) # 1\n print(q.poll()) # 2\n print(q.poll()) # 3\n\n print(q.isEmpty()) # true\n\n benchMarkTest()\n",
"step-ids": [
0,
8,
11,
12,
13
]
}
|
[
0,
8,
11,
12,
13
] |
<|reserved_special_token_0|>
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f'D:/temp_data/mask/test/{i}.jpg '
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(3, 1920)
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while True:
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440:-440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
if cv2.waitKey(1) & 255 == ord('q'):
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(3)
<|reserved_special_token_0|>
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f'D:/temp_data/mask/test/{i}.jpg '
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(3, 1920)
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while True:
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440:-440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
if cv2.waitKey(1) & 255 == ord('q'):
break
if __name__ == '__main__':
det = Detector(classes_info, model_info, 'cuda')
det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')
to_video(det)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(3)
colors = np.random.randint(128, 256, (100, 3))
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f'D:/temp_data/mask/test/{i}.jpg '
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(3, 1920)
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while True:
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440:-440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
if cv2.waitKey(1) & 255 == ord('q'):
break
if __name__ == '__main__':
det = Detector(classes_info, model_info, 'cuda')
det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')
to_video(det)
<|reserved_special_token_1|>
from core.detector import Detector
from utils.augmentations import *
from torchvision.transforms.transforms import Compose
from config.mask_config import *
from config.train_config import model_info
np.random.seed(3)
colors = np.random.randint(128, 256, (100, 3))
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f'D:/temp_data/mask/test/{i}.jpg '
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(3, 1920)
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while True:
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440:-440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = 'mask' if cid == 1 else 'face'
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid
].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b
[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow('image', image)
if cv2.waitKey(1) & 255 == ord('q'):
break
if __name__ == '__main__':
det = Detector(classes_info, model_info, 'cuda')
det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')
to_video(det)
<|reserved_special_token_1|>
from core.detector import Detector
from utils.augmentations import *
from torchvision.transforms.transforms import Compose
from config.mask_config import *
from config.train_config import model_info
np.random.seed(3)
colors = np.random.randint(128, 256, (100, 3))
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f"D:/temp_data/mask/test/{i}.jpg "
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = "mask" if cid == 1 else "face"
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, "{}:{}".format(cls, int(prob*100)), (b[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow("image", image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) # 参数为0时调用本地摄像头;url连接调取网络摄像头;文件地址获取本地视频
cap.set(3, 1920) # 设置分辨率
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while (True):
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440: -440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = "mask" if cid == 1 else "face"
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, "{}:{}".format(cls, int(prob * 100)), (b[0], b[1]), cv2.FONT_ITALIC, 1,
colors[cid].tolist(), 2)
cv2.imshow("image", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
det = Detector(classes_info, model_info, "cuda")
det.load_model("checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth")
# to_image(det)
to_video(det)
|
flexible
|
{
"blob_id": "97e7ca02d85267492a0dcbbda9d8754a0a3735a5",
"index": 5315,
"step-1": "<mask token>\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(3)\n<mask token>\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, 'cuda')\n det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')\n to_video(det)\n",
"step-3": "<mask token>\nnp.random.seed(3)\ncolors = np.random.randint(128, 256, (100, 3))\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, 'cuda')\n det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')\n to_video(det)\n",
"step-4": "from core.detector import Detector\nfrom utils.augmentations import *\nfrom torchvision.transforms.transforms import Compose\nfrom config.mask_config import *\nfrom config.train_config import model_info\nnp.random.seed(3)\ncolors = np.random.randint(128, 256, (100, 3))\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, 'cuda')\n det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')\n to_video(det)\n",
"step-5": "from core.detector import Detector\nfrom utils.augmentations import *\nfrom torchvision.transforms.transforms import Compose\nfrom config.mask_config import *\nfrom config.train_config import model_info\n\n\nnp.random.seed(3)\ncolors = np.random.randint(128, 256, (100, 3))\n\n\ndef to_image(det):\n size = 512\n\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n\n path = f\"D:/temp_data/mask/test/{i}.jpg \"\n print(path)\n image = cv2.imread(path)\n\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n\n for cid, bbox in bboxes[0].items():\n cls = \"mask\" if cid == 1 else \"face\"\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, \"{}:{}\".format(cls, int(prob*100)), (b[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow(\"image\", image)\n cv2.waitKey()\n\ndef to_video(det):\n size = 512\n\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) # 参数为0时调用本地摄像头;url连接调取网络摄像头;文件地址获取本地视频\n cap.set(3, 1920) # 设置分辨率\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while (True):\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440: -440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n\n for cid, bbox in bboxes[0].items():\n cls = \"mask\" if cid == 1 else \"face\"\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, \"{}:{}\".format(cls, int(prob * 100)), (b[0], b[1]), cv2.FONT_ITALIC, 1,\n colors[cid].tolist(), 2)\n cv2.imshow(\"image\", image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, \"cuda\")\n det.load_model(\"checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth\")\n # to_image(det)\n to_video(det)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python
import sys
import os
class ParseError(Exception):
pass
def remove_inline_comments(text):
ret = []
in_comment_block = False
p = 0
while True:
if (op := text.find('/*', p)) > 0:
in_comment_block = True
if op != p:
ret.append(text[p:op])
p = op + 2
else:
ret.append(text[p:])
break
if (op := text.find('*/', p)) > 0:
p = op + 2
in_comment_block = False
continue
else:
break
if in_comment_block:
exit(2)
return ''.join(ret)
def remove_comments(contents):
ret = []
for l in contents:
lstrip = l.strip()
if lstrip.startswith('//'):
continue
if (com := lstrip.find('//')) > 0:
ret.append(l[0:com])
continue
ret.append(l)
return ret
class AIns:
def __init__(self, token, symbols):
if token.isdecimal():
self.value = int(token)
if self.value > 2**15-1:
raise ParseError("A instruction value is too high")
elif token in symbols.symbols:
self.value = symbols.symbols[token]
else:
symbols.add(token)
self.value = symbols.symbols[token]
def get_binary(self):
return "0{:015b}".format(self.value)
class CIns:
comp = {
'0': '101010',
'1': '111111',
'-1': '111010',
'D': '001100',
'A': '110000',
'M': '110000',
'!D': '001101',
'!A': '110001',
'!M': '110001',
'-D': '001111',
'-A': '110011',
'-M': '110011',
'D+1': '011111',
'A+1': '110111',
'M+1': '110111',
'D-1': '001110',
'A-1': '110010',
'M-1': '110010',
'D+A': '000010',
'D+M': '000010',
'D-A': '010011',
'D-M': '010011',
'A-D': '000111',
'M-D': '000111',
'D&A': '000000',
'D&M': '000000',
'D|A': '010101',
'D|M': '010101',
}
jmp = {
'JGT': '001',
'JEQ': '010',
'JGE': '011',
'JLT': '100',
'JNE': '101',
'JLE': '110',
'JMP': '111',
}
def __init__(self, token):
self.raw_instruction = token
token = token.replace(' ', '')
self.dest = ''
self.comp = ''
self.jmp = ''
if '=' in token:
self.dest, token = token.split('=', 1)
if ';' in token:
self.comp, self.jmp = token.split(';', 1)
else:
self.comp = token
def get_binary(self):
head = '111'
a='0'
comp = '000000'
dst = ['0', '0', '0']
jmp = '000'
if self.dest:
if len(self.dest) > 3:
raise ParseError('Wrong dest length')
if 'A' in self.dest:
dst[0] = '1'
if 'D' in self.dest:
dst[1] = '1'
if 'M' in self.dest:
dst[2] = '1'
if self.jmp:
try:
jmp = CIns.jmp[self.jmp]
except KeyError:
raise ParseError('Wrong jmp instruction')
try:
comp = CIns.comp[self.comp]
except KeyError:
raise ParseError("Wrong comp instruction")
if 'M' in self.comp:
a = '1'
ret = "{}{}{}{}{}".format(head, a, comp, ''.join(dst), jmp)
if len(ret) > 16:
raise ParseError("CInstruction binary contruction error, command was '{}'".format(self.raw_instruction))
return ret
def parse(contents, symbols):
ret = []
for l in contents:
ls = l.strip()
if ls.startswith('@'):
ret.append(AIns(ls[1:], symbols))
else:
ret.append(CIns(ls))
return ret
class Symbols:
def __init__(self):
self.memptr = 16
self.symbols = {
'R0': 0,
'R1': 1,
'R2': 2,
'R3': 3,
'R4': 4,
'R5': 5,
'R6': 6,
'R7': 7,
'R8': 8,
'R9': 9,
'R10': 10,
'R11': 11,
'R12': 12,
'R13': 13,
'R14': 14,
'R15': 15,
'SCREEN': 16384,
'KBD': 24576,
'SP': 0,
'LCL': 1,
'ARG': 2,
'THIS': 3,
'THAT': 4,
}
def fill_with_labels(self, contents):
ret = []
pos = 0
for l in contents:
ls = l.strip()
if ls.startswith('(') and ls.endswith(')'):
label = ls[1:-1]
if label in self.symbols:
raise ParseError('Label redefinition')
else:
self.symbols[label] = pos
else:
ret.append(l)
pos += 1
return ret
def add(self, symbol):
if symbol in self.symbols:
raise ParseError('Variable redefinition')
self.symbols[symbol] = self.memptr
self.memptr += 1
def main():
if len(sys.argv) < 1:
exit(1)
filename = sys.argv[1]
contents = []
with open(filename) as f:
text = f.read()
contents = (remove_inline_comments(text)).split('\n')
contents = filter(None, remove_comments(contents))
symbols = Symbols()
contents = symbols.fill_with_labels(contents)
parsed = parse(contents, symbols)
out_filename = "{}.hack".format(os.path.splitext(filename)[0])
with open(out_filename, 'w') as f:
for i in parsed:
try:
f.write("{}\n".format(i.get_binary()))
except ParseError as e:
print(e)
exit(1)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "11e9e4dd5c9c6158fed40080d4cc221f28a0eba0",
"index": 8097,
"step-1": "<mask token>\n\n\nclass AIns:\n <mask token>\n <mask token>\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\n<mask token>\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n if in_comment_block:\n exit(2)\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n ret.append(l)\n return ret\n\n\nclass AIns:\n\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2 ** 15 - 1:\n raise ParseError('A instruction value is too high')\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return '0{:015b}'.format(self.value)\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\n<mask token>\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n if in_comment_block:\n exit(2)\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n ret.append(l)\n return ret\n\n\nclass AIns:\n\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2 ** 15 - 1:\n raise ParseError('A instruction value is too high')\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return '0{:015b}'.format(self.value)\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\ndef parse(contents, symbols):\n ret = []\n for l in contents:\n ls = l.strip()\n if ls.startswith('@'):\n ret.append(AIns(ls[1:], symbols))\n else:\n ret.append(CIns(ls))\n return ret\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n if in_comment_block:\n exit(2)\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n ret.append(l)\n return ret\n\n\nclass AIns:\n\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2 ** 15 - 1:\n raise ParseError('A instruction value is too high')\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return '0{:015b}'.format(self.value)\n\n\nclass CIns:\n comp = {'0': '101010', '1': '111111', '-1': '111010', 'D': '001100',\n 'A': '110000', 'M': '110000', '!D': '001101', '!A': '110001', '!M':\n '110001', '-D': '001111', '-A': '110011', '-M': '110011', 'D+1':\n '011111', 'A+1': '110111', 'M+1': '110111', 'D-1': '001110', 'A-1':\n '110010', 'M-1': '110010', 'D+A': '000010', 'D+M': '000010', 'D-A':\n '010011', 'D-M': '010011', 'A-D': '000111', 'M-D': '000111', 'D&A':\n '000000', 'D&M': '000000', 'D|A': '010101', 'D|M': '010101'}\n jmp = {'JGT': '001', 'JEQ': '010', 'JGE': '011', 'JLT': '100', 'JNE':\n '101', 'JLE': '110', 'JMP': '111'}\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a = '0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError('Wrong comp instruction')\n if 'M' in self.comp:\n a = '1'\n ret = '{}{}{}{}{}'.format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\n \"CInstruction binary contruction error, command was '{}'\".\n format(self.raw_instruction))\n return ret\n\n\ndef parse(contents, symbols):\n ret = []\n for l in contents:\n ls = l.strip()\n if ls.startswith('@'):\n ret.append(AIns(ls[1:], symbols))\n else:\n ret.append(CIns(ls))\n return ret\n\n\nclass Symbols:\n\n def __init__(self):\n self.memptr = 16\n self.symbols = {'R0': 0, 'R1': 1, 'R2': 2, 'R3': 3, 'R4': 4, 'R5': \n 5, 'R6': 6, 'R7': 7, 'R8': 8, 'R9': 9, 'R10': 10, 'R11': 11,\n 'R12': 12, 'R13': 13, 'R14': 14, 'R15': 15, 'SCREEN': 16384,\n 'KBD': 24576, 'SP': 0, 'LCL': 1, 'ARG': 2, 'THIS': 3, 'THAT': 4}\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\ndef main():\n if len(sys.argv) < 1:\n exit(1)\n filename = sys.argv[1]\n contents = []\n with open(filename) as f:\n text = f.read()\n contents = remove_inline_comments(text).split('\\n')\n contents = filter(None, remove_comments(contents))\n symbols = Symbols()\n contents = symbols.fill_with_labels(contents)\n parsed = parse(contents, symbols)\n out_filename = '{}.hack'.format(os.path.splitext(filename)[0])\n with open(out_filename, 'w') as f:\n for i in parsed:\n try:\n f.write('{}\\n'.format(i.get_binary()))\n except ParseError as e:\n print(e)\n exit(1)\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python\n\nimport sys\nimport os\n\n\nclass ParseError(Exception):\n pass\n\n\ndef remove_inline_comments(text):\n ret = []\n in_comment_block = False\n p = 0\n while True:\n if (op := text.find('/*', p)) > 0:\n in_comment_block = True\n if op != p:\n ret.append(text[p:op])\n p = op + 2\n else:\n ret.append(text[p:])\n break\n\n if (op := text.find('*/', p)) > 0:\n p = op + 2\n in_comment_block = False\n continue\n else:\n break\n\n if in_comment_block:\n exit(2)\n\n return ''.join(ret)\n\n\ndef remove_comments(contents):\n ret = []\n for l in contents:\n lstrip = l.strip()\n if lstrip.startswith('//'):\n continue\n if (com := lstrip.find('//')) > 0:\n ret.append(l[0:com])\n continue\n\n ret.append(l)\n\n return ret\n\n\nclass AIns:\n def __init__(self, token, symbols):\n if token.isdecimal():\n self.value = int(token)\n if self.value > 2**15-1:\n raise ParseError(\"A instruction value is too high\")\n\n elif token in symbols.symbols:\n self.value = symbols.symbols[token]\n else:\n symbols.add(token)\n self.value = symbols.symbols[token]\n\n def get_binary(self):\n return \"0{:015b}\".format(self.value)\n\n\nclass CIns:\n\n comp = {\n '0': '101010',\n '1': '111111',\n '-1': '111010',\n 'D': '001100',\n 'A': '110000',\n 'M': '110000',\n '!D': '001101',\n '!A': '110001',\n '!M': '110001',\n '-D': '001111',\n '-A': '110011',\n '-M': '110011',\n 'D+1': '011111',\n 'A+1': '110111',\n 'M+1': '110111',\n 'D-1': '001110',\n 'A-1': '110010',\n 'M-1': '110010',\n 'D+A': '000010',\n 'D+M': '000010',\n 'D-A': '010011',\n 'D-M': '010011',\n 'A-D': '000111',\n 'M-D': '000111',\n 'D&A': '000000',\n 'D&M': '000000',\n 'D|A': '010101',\n 'D|M': '010101',\n }\n\n jmp = {\n 'JGT': '001',\n 'JEQ': '010',\n 'JGE': '011',\n 'JLT': '100',\n 'JNE': '101',\n 'JLE': '110',\n 'JMP': '111',\n }\n\n def __init__(self, token):\n self.raw_instruction = token\n token = token.replace(' ', '')\n self.dest = ''\n self.comp = ''\n self.jmp = ''\n if '=' in token:\n self.dest, token = token.split('=', 1)\n if ';' in token:\n self.comp, self.jmp = token.split(';', 1)\n else:\n self.comp = token\n\n def get_binary(self):\n head = '111'\n a='0'\n comp = '000000'\n dst = ['0', '0', '0']\n jmp = '000'\n\n if self.dest:\n if len(self.dest) > 3:\n raise ParseError('Wrong dest length')\n if 'A' in self.dest:\n dst[0] = '1'\n if 'D' in self.dest:\n dst[1] = '1'\n if 'M' in self.dest:\n dst[2] = '1'\n\n if self.jmp:\n try:\n jmp = CIns.jmp[self.jmp]\n except KeyError:\n raise ParseError('Wrong jmp instruction')\n\n try:\n comp = CIns.comp[self.comp]\n except KeyError:\n raise ParseError(\"Wrong comp instruction\")\n\n if 'M' in self.comp:\n a = '1'\n\n ret = \"{}{}{}{}{}\".format(head, a, comp, ''.join(dst), jmp)\n if len(ret) > 16:\n raise ParseError(\"CInstruction binary contruction error, command was '{}'\".format(self.raw_instruction))\n return ret\n\n\ndef parse(contents, symbols):\n ret = []\n for l in contents:\n ls = l.strip()\n if ls.startswith('@'):\n ret.append(AIns(ls[1:], symbols))\n else:\n ret.append(CIns(ls))\n\n return ret\n\n\nclass Symbols:\n def __init__(self):\n self.memptr = 16\n self.symbols = {\n 'R0': 0,\n 'R1': 1,\n 'R2': 2,\n 'R3': 3,\n 'R4': 4,\n 'R5': 5,\n 'R6': 6,\n 'R7': 7,\n 'R8': 8,\n 'R9': 9,\n 'R10': 10,\n 'R11': 11,\n 'R12': 12,\n 'R13': 13,\n 'R14': 14,\n 'R15': 15,\n 'SCREEN': 16384,\n 'KBD': 24576,\n 'SP': 0,\n 'LCL': 1,\n 'ARG': 2,\n 'THIS': 3,\n 'THAT': 4,\n }\n\n def fill_with_labels(self, contents):\n ret = []\n pos = 0\n for l in contents:\n ls = l.strip()\n if ls.startswith('(') and ls.endswith(')'):\n label = ls[1:-1]\n if label in self.symbols:\n raise ParseError('Label redefinition')\n else:\n self.symbols[label] = pos\n else:\n ret.append(l)\n pos += 1\n\n return ret\n\n def add(self, symbol):\n if symbol in self.symbols:\n raise ParseError('Variable redefinition')\n self.symbols[symbol] = self.memptr\n self.memptr += 1\n\n\ndef main():\n if len(sys.argv) < 1:\n exit(1)\n\n filename = sys.argv[1]\n\n contents = []\n with open(filename) as f:\n text = f.read()\n\n contents = (remove_inline_comments(text)).split('\\n')\n\n contents = filter(None, remove_comments(contents))\n\n symbols = Symbols()\n contents = symbols.fill_with_labels(contents)\n\n parsed = parse(contents, symbols)\n\n out_filename = \"{}.hack\".format(os.path.splitext(filename)[0])\n\n with open(out_filename, 'w') as f:\n for i in parsed:\n try:\n f.write(\"{}\\n\".format(i.get_binary()))\n except ParseError as e:\n print(e)\n exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
9,
14,
15,
16,
19
]
}
|
[
9,
14,
15,
16,
19
] |
<|reserved_special_token_0|>
class Templating(templating.Templating):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Templating(templating.Templating):
"""
Application-specific templating implementation.
Overriding "args" methods makes it trivial to push extra, application-wide
data to the templates without any assistance from the resource.
"""
def __init__(self, app_conf):
renderer = make_renderer(app_conf)
templating.Templating.__init__(self, renderer)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Templating(templating.Templating):
"""
Application-specific templating implementation.
Overriding "args" methods makes it trivial to push extra, application-wide
data to the templates without any assistance from the resource.
"""
def __init__(self, app_conf):
renderer = make_renderer(app_conf)
templating.Templating.__init__(self, renderer)
def make_renderer(app_conf):
"""
Create and return a restish.templating "renderer".
"""
import pkg_resources
import os.path
from restish.contrib.makorenderer import MakoRenderer
return MakoRenderer(directories=[pkg_resources.resource_filename(
'example', 'templates'), pkg_resources.resource_filename('formish',
'templates/mako'), pkg_resources.resource_filename('adminish',
'templates')], module_directory=os.path.join(app_conf['cache_dir'],
'templates'), input_encoding='utf-8', output_encoding='utf-8',
default_filters=['unicode', 'h'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from restish import templating
class Templating(templating.Templating):
"""
Application-specific templating implementation.
Overriding "args" methods makes it trivial to push extra, application-wide
data to the templates without any assistance from the resource.
"""
def __init__(self, app_conf):
renderer = make_renderer(app_conf)
templating.Templating.__init__(self, renderer)
def make_renderer(app_conf):
"""
Create and return a restish.templating "renderer".
"""
import pkg_resources
import os.path
from restish.contrib.makorenderer import MakoRenderer
return MakoRenderer(directories=[pkg_resources.resource_filename(
'example', 'templates'), pkg_resources.resource_filename('formish',
'templates/mako'), pkg_resources.resource_filename('adminish',
'templates')], module_directory=os.path.join(app_conf['cache_dir'],
'templates'), input_encoding='utf-8', output_encoding='utf-8',
default_filters=['unicode', 'h'])
<|reserved_special_token_1|>
"""
Templating support library and renderer configuration.
"""
from restish import templating
class Templating(templating.Templating):
"""
Application-specific templating implementation.
Overriding "args" methods makes it trivial to push extra, application-wide
data to the templates without any assistance from the resource.
"""
def __init__(self, app_conf):
renderer = make_renderer(app_conf)
templating.Templating.__init__(self, renderer)
def make_renderer(app_conf):
"""
Create and return a restish.templating "renderer".
"""
# Uncomment for an example of Mako templating support.
import pkg_resources
import os.path
from restish.contrib.makorenderer import MakoRenderer
return MakoRenderer(
directories=[
pkg_resources.resource_filename('example', 'templates'),
pkg_resources.resource_filename('formish', 'templates/mako'),
pkg_resources.resource_filename('adminish', 'templates'),
],
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', output_encoding='utf-8',
default_filters=['unicode', 'h']
)
|
flexible
|
{
"blob_id": "18391df9a3e52400fe4fc54d6381b9ce21e25f0b",
"index": 2296,
"step-1": "<mask token>\n\n\nclass Templating(templating.Templating):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\ndef make_renderer(app_conf):\n \"\"\"\n Create and return a restish.templating \"renderer\".\n \"\"\"\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(directories=[pkg_resources.resource_filename(\n 'example', 'templates'), pkg_resources.resource_filename('formish',\n 'templates/mako'), pkg_resources.resource_filename('adminish',\n 'templates')], module_directory=os.path.join(app_conf['cache_dir'],\n 'templates'), input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h'])\n",
"step-4": "<mask token>\nfrom restish import templating\n\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\ndef make_renderer(app_conf):\n \"\"\"\n Create and return a restish.templating \"renderer\".\n \"\"\"\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(directories=[pkg_resources.resource_filename(\n 'example', 'templates'), pkg_resources.resource_filename('formish',\n 'templates/mako'), pkg_resources.resource_filename('adminish',\n 'templates')], module_directory=os.path.join(app_conf['cache_dir'],\n 'templates'), input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h'])\n",
"step-5": "\"\"\"\nTemplating support library and renderer configuration.\n\"\"\"\n\nfrom restish import templating\n\nclass Templating(templating.Templating):\n \"\"\"\n Application-specific templating implementation.\n\n Overriding \"args\" methods makes it trivial to push extra, application-wide\n data to the templates without any assistance from the resource.\n \"\"\"\n\n def __init__(self, app_conf):\n renderer = make_renderer(app_conf)\n templating.Templating.__init__(self, renderer)\n\n\ndef make_renderer(app_conf):\n \"\"\"\n Create and return a restish.templating \"renderer\".\n \"\"\"\n\n # Uncomment for an example of Mako templating support.\n import pkg_resources\n import os.path\n from restish.contrib.makorenderer import MakoRenderer\n return MakoRenderer(\n directories=[\n pkg_resources.resource_filename('example', 'templates'),\n pkg_resources.resource_filename('formish', 'templates/mako'),\n pkg_resources.resource_filename('adminish', 'templates'),\n ],\n module_directory=os.path.join(app_conf['cache_dir'], 'templates'),\n input_encoding='utf-8', output_encoding='utf-8',\n default_filters=['unicode', 'h']\n )\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
"""Functional tests for h2 frames."""
__author__ = "Tempesta Technologies, Inc."
__copyright__ = "Copyright (C) 2023 Tempesta Technologies, Inc."
__license__ = "GPL2"
from h2.errors import ErrorCodes
from h2.exceptions import StreamClosedError
from framework import deproxy_client, tester
from helpers import checks_for_tests as checks
from http2_general.helpers import H2Base
from helpers.networker import NetWorker
from hpack import HeaderTuple
class TestH2Frame(H2Base):
def test_data_framing(self):
"""Send many 1 byte frames in request."""
self.start_all_services()
deproxy_cl = self.get_client("deproxy")
deproxy_cl.parsing = False
request_body = "x" * 100
deproxy_cl.make_request(request=self.post_request, end_stream=False)
for byte in request_body[:-1]:
deproxy_cl.make_request(request=byte, end_stream=False)
deproxy_cl.make_request(request=request_body[-1], end_stream=True)
self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)
def test_empty_last_data_frame(self):
"""
Send request with empty last data frame. It is valid request. RFC 9113 6.9.1.
"""
self.start_all_services()
deproxy_cl = self.get_client("deproxy")
deproxy_cl.parsing = False
request_body = "123"
deproxy_cl.make_request(request=self.post_request, end_stream=False)
deproxy_cl.make_request(request=request_body, end_stream=False)
deproxy_cl.make_request(request="", end_stream=True)
self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)
def test_empty_data_frame(self):
"""
Send request with empty data frame. It is valid request. RFC 9113 10.5.
"""
self.start_all_services()
deproxy_cl = self.get_client("deproxy")
deproxy_cl.parsing = False
request_body = "123"
deproxy_cl.make_request(request=self.post_request, end_stream=False)
deproxy_cl.make_request(request="", end_stream=False)
deproxy_cl.make_request(request=request_body, end_stream=True)
self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)
def test_settings_frame(self):
"""
Create tls connection and send preamble + correct settings frame.
Tempesta must accept settings and return settings + ack settings frames.
Then client send ack settings frame and Tempesta must correctly accept it.
"""
self.start_all_services(client=True)
client: deproxy_client.DeproxyClientH2 = self.get_client("deproxy")
# initiate_connection() generates preamble + settings frame with default variables
self.initiate_h2_connection(client)
# send empty setting frame with ack flag.
client.send_bytes(client.h2_connection.data_to_send())
client.h2_connection.clear_outbound_data_buffer()
# send header frame after exchanging settings and make sure
# that connection is open.
client.send_request(self.post_request, "200")
def test_window_update_frame(self):
"""Tempesta must handle WindowUpdate frame."""
self.start_all_services(client=True)
client: deproxy_client.DeproxyClientH2 = self.get_client("deproxy")
# add preamble + settings frame with SETTING_INITIAL_WINDOW_SIZE = 65535
client.update_initial_settings()
# send preamble + settings frame
client.send_bytes(client.h2_connection.data_to_send())
client.h2_connection.clear_outbound_data_buffer()
self.assertTrue(client.wait_for_ack_settings())
# send WindowUpdate frame with window size increment = 5000
client.h2_connection.increment_flow_control_window(5000)
client.send_bytes(client.h2_connection.data_to_send())
client.h2_connection.clear_outbound_data_buffer()
# send header frame after sending WindowUpdate and make sure
# that connection is working correctly.
client.send_request(self.get_request, "200")
self.assertFalse(client.connection_is_closed())
def test_continuation_frame(self):
"""Tempesta must handle CONTINUATION frame."""
self.start_all_services()
client: deproxy_client.DeproxyClientH2 = self.get_client("deproxy")
client.update_initial_settings()
client.send_bytes(client.h2_connection.data_to_send())
client.h2_connection.clear_outbound_data_buffer()
# H2Connection separates headers to HEADERS + CONTINUATION frames
# if they are larger than 16384 bytes
client.send_request(
request=self.get_request + [("qwerty", "x" * 5000) for _ in range(4)],
expected_status_code="200",
)
self.assertFalse(client.connection_is_closed())
def test_rst_frame_in_request(self):
"""
Tempesta must handle RST_STREAM frame and close stream but other streams MUST work.
"""
client = self.get_client("deproxy")
self.start_all_services()
self.initiate_h2_connection(client)
# client opens streams with id 1, 3 and does not close them
client.make_request(request=self.post_request, end_stream=False)
client.stream_id = 3
client.make_request(request=self.post_request, end_stream=False)
# client send RST_STREAM frame with NO_ERROR code in stream 1 and
# Tempesta closes it for itself.
client.h2_connection.reset_stream(stream_id=1, error_code=0)
client.send_bytes(client.h2_connection.data_to_send())
# Client send DATA frame in stream 3 and it MUST receive response
client.send_request("qwe", "200")
# Tempesta allows creating new streams.
client.stream_id = 5
client.send_request(self.post_request, "200")
self.assertFalse(
client.connection_is_closed(), "Tempesta closed connection after receiving RST_STREAM."
)
def test_rst_frame_in_response(self):
"""
When Tempesta returns RST_STREAM:
- open streams must not be closed;
- new streams must be accepted.
"""
client = self.get_client("deproxy")
client.parsing = False
self.start_all_services()
self.initiate_h2_connection(client)
# client opens stream with id 1 and does not close it
client.make_request(request=self.post_request, end_stream=False)
# client send invalid request and Tempesta returns RST_STREAM
stream_with_rst = 3
client.stream_id = stream_with_rst
client.send_request(self.get_request + [("x-forwarded-for", "1.1.1.1.1.1")], "400")
# client open new stream
client.make_request(self.get_request, end_stream=True)
client.wait_for_response(3)
# client send DATA frame in stream 1 and it must be open.
client.stream_id = 1
client.make_request("body", end_stream=True)
client.wait_for_response(3)
self.assertRaises(
StreamClosedError, client.h2_connection._get_stream_by_id, stream_with_rst
)
self.assertFalse(
client.connection_is_closed(), "Tempesta closed connection after sending RST_STREAM."
)
def test_rst_stream_with_id_0(self):
"""
RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame
is received with a stream identifier of 0x00, the recipient MUST treat this
as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
RFC 9113 6.4
"""
client = self.get_client("deproxy")
self.start_all_services()
self.initiate_h2_connection(client)
# send RST_STREAM with id 0
client.send_bytes(b"\x00\x00\x04\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00")
self.assertTrue(
client.wait_for_connection_close(1),
"Tempesta did not close connection after receiving RST_STREAM with id 0.",
)
self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)
def test_goaway_frame_in_response(self):
"""
Tempesta must:
- close all streams for connection error (GOAWAY);
- return last_stream_id.
There is an inherent race condition between an endpoint starting new streams
and the remote peer sending a GOAWAY frame. To deal with this case, the GOAWAY
contains the stream identifier of the last peer-initiated stream that was or
might be processed on the sending endpoint in this connection. For instance,
if the server sends a GOAWAY frame, the identified stream is the highest-numbered
stream initiated by the client.
RFC 9113 6.8
"""
client = self.get_client("deproxy")
self.start_all_services()
self.initiate_h2_connection(client)
# Client opens many streams and does not close them
for stream_id in range(1, 6, 2):
client.stream_id = stream_id
client.make_request(request=self.post_request, end_stream=False)
# Client send DATA frame with stream id 0.
# Tempesta MUST return GOAWAY frame with PROTOCOL_ERROR
client.send_bytes(b"\x00\x00\x03\x00\x01\x00\x00\x00\x00asd")
self.assertTrue(client.wait_for_connection_close(3), "Tempesta did not send GOAWAY frame.")
self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)
self.assertEqual(
client.last_stream_id,
stream_id,
"Tempesta returned invalid last_stream_id in GOAWAY frame.",
)
def test_goaway_frame_in_request(self):
"""
Tempesta must not close connection after receiving GOAWAY frame.
GOAWAY allows an endpoint to gracefully stop accepting new streams while still
finishing processing of previously established streams.
RFC 9113 6.8
"""
client = self.get_client("deproxy")
self.start_all_services()
self.initiate_h2_connection(client)
# Client opens many streams and does not close them
for stream_id in range(1, 6, 2):
client.stream_id = stream_id
client.make_request(request=self.post_request, end_stream=False)
# Client send GOAWAY frame with PROTOCOL_ERROR as bytes
# because `_terminate_connection` method changes state machine to closed
client.send_bytes(b"\x00\x00\x08\x07\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x01")
# Client sends frames in already open streams.
# Tempesta must handle these frames and must not close streams,
# because sender closes connection, but not receiver.
for stream_id in range(1, 6, 2):
client.stream_id = stream_id
client.make_request(request="asd", end_stream=True)
self.assertTrue(
client.wait_for_response(), "Tempesta closed connection after receiving GOAWAY frame."
)
def test_double_header_frame_in_single_stream(self):
client = self.get_client("deproxy")
self.start_all_services()
self.initiate_h2_connection(client)
client.make_request(self.post_request, end_stream=False)
client.make_request([("header1", "header value1")], end_stream=True)
self.assertTrue(client.wait_for_connection_close())
self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)
def __assert_test(self, client, request_body: str, request_number: int):
server = self.get_server("deproxy")
self.assertTrue(client.wait_for_response(timeout=5))
self.assertEqual(client.last_response.status, "200")
self.assertEqual(len(server.requests), request_number)
checks.check_tempesta_request_and_response_stats(
tempesta=self.get_tempesta(),
cl_msg_received=request_number,
cl_msg_forwarded=request_number,
srv_msg_received=request_number,
srv_msg_forwarded=request_number,
)
error_msg = "Malformed request from Tempesta."
self.assertEqual(server.last_request.method, self.post_request[3][1], error_msg)
self.assertEqual(server.last_request.headers["host"], self.post_request[0][1], error_msg)
self.assertEqual(server.last_request.uri, self.post_request[1][1], error_msg)
self.assertEqual(server.last_request.body, request_body)
class TestH2FrameEnabledDisabledTsoGroGsoBase(H2Base):
def setup_tests(self):
self.start_all_services()
client = self.get_client("deproxy")
server = self.get_server("deproxy")
client.update_initial_settings(header_table_size=512)
client.send_bytes(client.h2_connection.data_to_send())
client.wait_for_ack_settings()
return client, server
DEFAULT_MTU = 1500
class TestH2FrameEnabledDisabledTsoGroGso(TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):
def test_headers_frame_with_continuation(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_with_continuation, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_with_continuation, DEFAULT_MTU
)
def test_headers_frame_without_continuation(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_without_continuation, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_without_continuation, DEFAULT_MTU
)
def test_data_frame(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(client, server, self._test_data_frame, DEFAULT_MTU)
self.run_test_tso_gro_gso_enabled(client, server, self._test_data_frame, DEFAULT_MTU)
def test_headers_frame_for_local_resp_invalid_req_d(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU
)
def test_headers_frame_for_local_resp_invalid_req_e(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU
)
def _test_headers_frame_for_local_resp_invalid_req(self, client, server):
client.send_request(
request=[
HeaderTuple(":authority", "bad.com"),
HeaderTuple(":path", "/"),
HeaderTuple(":scheme", "https"),
HeaderTuple(":method", "GET"),
],
expected_status_code="403",
)
def _test_data_frame(self, client, server):
self._test_headers_data_frames(client, server, 50000, 100000)
def _test_headers_frame_with_continuation(self, client, server):
self._test_headers_data_frames(client, server, 50000, 0)
def _test_headers_frame_without_continuation(self, client, server):
self._test_headers_data_frames(client, server, 1000, 0)
def _test_headers_data_frames(self, client, server, header_len, body_len):
header = ("qwerty", "x" * header_len)
server.set_response(
"HTTP/1.1 200 OK\r\n" + "Date: test\r\n" + "Server: debian\r\n"
f"{header[0]}: {header[1]}\r\n"
+ f"Content-Length: {body_len}\r\n\r\n"
+ ("x" * body_len)
)
client.make_request(self.post_request)
client.wait_for_response(5)
self.assertFalse(client.connection_is_closed())
self.assertEqual(client.last_response.status, "200", "Status code mismatch.")
self.assertIsNotNone(client.last_response.headers.get(header[0]))
self.assertEqual(len(client.last_response.headers.get(header[0])), len(header[1]))
self.assertEqual(
len(client.last_response.body), body_len, "Tempesta did not return full response body."
)
class TestH2FrameEnabledDisabledTsoGroGsoStickyCookie(
TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker
):
tempesta = {
"config": """
listen 443 proto=h2;
srv_group default {
server ${server_ip}:8000;
}
vhost v_good {
proxy_pass default;
sticky {
sticky_sessions;
cookie enforce;
secret "f00)9eR59*_/22";
}
}
tls_certificate ${tempesta_workdir}/tempesta.crt;
tls_certificate_key ${tempesta_workdir}/tempesta.key;
tls_match_any_server_name;
cache 1;
cache_fulfill * *;
block_action attack reply;
block_action error reply;
http_chain {
host == "bad.com" -> block;
host == "example.com" -> v_good;
}
"""
}
def test_headers_frame_for_local_resp_sticky_cookie_short(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU
)
def test_headers_frame_for_local_resp_sticky_cookie_long(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU
)
def _test_headers_frame_for_local_resp_sticky_cookie_short(self, client, server):
self._test_headers_frame_for_local_resp_sticky_cookie(client, server, 1000, 0)
def _test_headers_frame_for_local_resp_sticky_cookie_long(self, client, server):
self._test_headers_frame_for_local_resp_sticky_cookie(client, server, 50000, 50000)
def _test_headers_frame_for_local_resp_sticky_cookie(
self, client, server, header_len, body_len
):
header = ("qwerty", "x" * header_len)
server.set_response(
"HTTP/1.1 200 OK\r\n" + "Date: test\r\n" + "Server: debian\r\n"
f"{header[0]}: {header[1]}\r\n"
+ f"Content-Length: {body_len}\r\n\r\n"
+ ("x" * body_len)
)
client.send_request(request=self.post_request, expected_status_code="302")
self.post_request.append(HeaderTuple("Cookie", client.last_response.headers["set-cookie"]))
client.send_request(request=self.post_request, expected_status_code="200")
self.post_request.pop()
class TestH2FrameEnabledDisabledTsoGroGsoCache(TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):
tempesta = {
"config": """
listen 443 proto=h2;
srv_group default {
server ${server_ip}:8000;
}
vhost v_good {
proxy_pass default;
}
tls_certificate ${tempesta_workdir}/tempesta.crt;
tls_certificate_key ${tempesta_workdir}/tempesta.key;
tls_match_any_server_name;
cache 1;
cache_fulfill * *;
cache_methods GET;
block_action attack reply;
block_action error reply;
http_chain {
host == "bad.com" -> block;
host == "example.com" -> v_good;
}
"""
}
def test_headers_frame_for_local_resp_cache_304_short(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU
)
def test_headers_frame_for_local_resp_cache_200_short(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU
)
def test_headers_frame_for_local_resp_cache_304_long(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU
)
def test_headers_frame_for_local_resp_cache_200_long(self):
client, server = self.setup_tests()
self.run_test_tso_gro_gso_disabled(
client, server, self._test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU
)
self.run_test_tso_gro_gso_enabled(
client, server, self._test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU
)
def _test_headers_frame_for_local_resp_cache_304_short(self, client, server):
self._test_headers_frame_for_local_resp_cache(
client, server, 1000, 0, "Mon, 12 Dec 2024 13:59:39 GMT", "304"
)
def _test_headers_frame_for_local_resp_cache_200_short(self, client, server):
self._test_headers_frame_for_local_resp_cache(
client, server, 1000, 0, "Mon, 12 Dec 2020 13:59:39 GMT", "200"
)
def _test_headers_frame_for_local_resp_cache_304_long(self, client, server):
self._test_headers_frame_for_local_resp_cache(
client, server, 50000, 100000, "Mon, 12 Dec 2024 13:59:39 GMT", "304"
)
def _test_headers_frame_for_local_resp_cache_200_long(self, client, server):
self._test_headers_frame_for_local_resp_cache(
client, server, 50000, 100000, "Mon, 12 Dec 2020 13:59:39 GMT", "200"
)
def _test_headers_frame_for_local_resp_cache(
self, client, server, header_len, body_len, date, status_code
):
header = ("qwerty", "x" * header_len)
server.set_response(
"HTTP/1.1 200 OK\r\n" + "Date: test\r\n" + "Server: debian\r\n"
f"{header[0]}: {header[1]}\r\n"
+ f"Content-Length: {body_len}\r\n\r\n"
+ ("x" * body_len)
)
headers = [
HeaderTuple(":authority", "example.com"),
HeaderTuple(":path", "/"),
HeaderTuple(":scheme", "https"),
HeaderTuple(":method", "GET"),
]
client.send_request(request=headers, expected_status_code="200")
headers.append(HeaderTuple("if-modified-since", date))
client.send_request(request=headers, expected_status_code=status_code)
|
normal
|
{
"blob_id": "e474cb3db74b5344bd861aacf779cb9f77830ef6",
"index": 5661,
"step-1": "<mask token>\n\n\nclass TestH2FrameEnabledDisabledTsoGroGso(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n\n def test_headers_frame_with_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n\n def test_headers_frame_without_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n\n def test_data_frame(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_d(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_e(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n <mask token>\n\n def _test_data_frame(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 100000)\n <mask token>\n\n def _test_headers_frame_without_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 1000, 0)\n\n def _test_headers_data_frames(self, client, server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.make_request(self.post_request)\n client.wait_for_response(5)\n self.assertFalse(client.connection_is_closed())\n self.assertEqual(client.last_response.status, '200',\n 'Status code mismatch.')\n self.assertIsNotNone(client.last_response.headers.get(header[0]))\n self.assertEqual(len(client.last_response.headers.get(header[0])),\n len(header[1]))\n self.assertEqual(len(client.last_response.body), body_len,\n 'Tempesta did not return full response body.')\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoStickyCookie(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n sticky {\n sticky_sessions;\n cookie enforce;\n secret \"f00)9eR59*_/22\";\n }\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_sticky_cookie_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_sticky_cookie_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_short(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 1000, 0)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_long(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 50000, 50000)\n\n def _test_headers_frame_for_local_resp_sticky_cookie(self, client,\n server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.send_request(request=self.post_request, expected_status_code\n ='302')\n self.post_request.append(HeaderTuple('Cookie', client.last_response\n .headers['set-cookie']))\n client.send_request(request=self.post_request, expected_status_code\n ='200')\n self.post_request.pop()\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoCache(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n cache_methods GET;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_cache_304_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_304_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_cache_304_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache_304_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache(self, client, server,\n header_len, body_len, date, status_code):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n headers = [HeaderTuple(':authority', 'example.com'), HeaderTuple(\n ':path', '/'), HeaderTuple(':scheme', 'https'), HeaderTuple(\n ':method', 'GET')]\n client.send_request(request=headers, expected_status_code='200')\n headers.append(HeaderTuple('if-modified-since', date))\n client.send_request(request=headers, expected_status_code=status_code)\n",
"step-2": "<mask token>\n\n\nclass TestH2Frame(H2Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_rst_frame_in_response(self):\n \"\"\"\n When Tempesta returns RST_STREAM:\n - open streams must not be closed;\n - new streams must be accepted.\n \"\"\"\n client = self.get_client('deproxy')\n client.parsing = False\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.make_request(request=self.post_request, end_stream=False)\n stream_with_rst = 3\n client.stream_id = stream_with_rst\n client.send_request(self.get_request + [('x-forwarded-for',\n '1.1.1.1.1.1')], '400')\n client.make_request(self.get_request, end_stream=True)\n client.wait_for_response(3)\n client.stream_id = 1\n client.make_request('body', end_stream=True)\n client.wait_for_response(3)\n self.assertRaises(StreamClosedError, client.h2_connection.\n _get_stream_by_id, stream_with_rst)\n self.assertFalse(client.connection_is_closed(),\n 'Tempesta closed connection after sending RST_STREAM.')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoBase(H2Base):\n\n def setup_tests(self):\n self.start_all_services()\n client = self.get_client('deproxy')\n server = self.get_server('deproxy')\n client.update_initial_settings(header_table_size=512)\n client.send_bytes(client.h2_connection.data_to_send())\n client.wait_for_ack_settings()\n return client, server\n\n\n<mask token>\n\n\nclass TestH2FrameEnabledDisabledTsoGroGso(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n\n def test_headers_frame_with_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n\n def test_headers_frame_without_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n\n def test_data_frame(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_d(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_e(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_invalid_req(self, client, server):\n client.send_request(request=[HeaderTuple(':authority', 'bad.com'),\n HeaderTuple(':path', '/'), HeaderTuple(':scheme', 'https'),\n HeaderTuple(':method', 'GET')], expected_status_code='403')\n\n def _test_data_frame(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 100000)\n\n def _test_headers_frame_with_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 0)\n\n def _test_headers_frame_without_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 1000, 0)\n\n def _test_headers_data_frames(self, client, server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.make_request(self.post_request)\n client.wait_for_response(5)\n self.assertFalse(client.connection_is_closed())\n self.assertEqual(client.last_response.status, '200',\n 'Status code mismatch.')\n self.assertIsNotNone(client.last_response.headers.get(header[0]))\n self.assertEqual(len(client.last_response.headers.get(header[0])),\n len(header[1]))\n self.assertEqual(len(client.last_response.body), body_len,\n 'Tempesta did not return full response body.')\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoStickyCookie(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n sticky {\n sticky_sessions;\n cookie enforce;\n secret \"f00)9eR59*_/22\";\n }\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_sticky_cookie_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_sticky_cookie_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_short(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 1000, 0)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_long(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 50000, 50000)\n\n def _test_headers_frame_for_local_resp_sticky_cookie(self, client,\n server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.send_request(request=self.post_request, expected_status_code\n ='302')\n self.post_request.append(HeaderTuple('Cookie', client.last_response\n .headers['set-cookie']))\n client.send_request(request=self.post_request, expected_status_code\n ='200')\n self.post_request.pop()\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoCache(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n cache_methods GET;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_cache_304_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_304_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_cache_304_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache_304_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache(self, client, server,\n header_len, body_len, date, status_code):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n headers = [HeaderTuple(':authority', 'example.com'), HeaderTuple(\n ':path', '/'), HeaderTuple(':scheme', 'https'), HeaderTuple(\n ':method', 'GET')]\n client.send_request(request=headers, expected_status_code='200')\n headers.append(HeaderTuple('if-modified-since', date))\n client.send_request(request=headers, expected_status_code=status_code)\n",
"step-3": "<mask token>\n\n\nclass TestH2Frame(H2Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_window_update_frame(self):\n \"\"\"Tempesta must handle WindowUpdate frame.\"\"\"\n self.start_all_services(client=True)\n client: deproxy_client.DeproxyClientH2 = self.get_client('deproxy')\n client.update_initial_settings()\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n self.assertTrue(client.wait_for_ack_settings())\n client.h2_connection.increment_flow_control_window(5000)\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n client.send_request(self.get_request, '200')\n self.assertFalse(client.connection_is_closed())\n\n def test_continuation_frame(self):\n \"\"\"Tempesta must handle CONTINUATION frame.\"\"\"\n self.start_all_services()\n client: deproxy_client.DeproxyClientH2 = self.get_client('deproxy')\n client.update_initial_settings()\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n client.send_request(request=self.get_request + [('qwerty', 'x' * \n 5000) for _ in range(4)], expected_status_code='200')\n self.assertFalse(client.connection_is_closed())\n <mask token>\n\n def test_rst_frame_in_response(self):\n \"\"\"\n When Tempesta returns RST_STREAM:\n - open streams must not be closed;\n - new streams must be accepted.\n \"\"\"\n client = self.get_client('deproxy')\n client.parsing = False\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.make_request(request=self.post_request, end_stream=False)\n stream_with_rst = 3\n client.stream_id = stream_with_rst\n client.send_request(self.get_request + [('x-forwarded-for',\n '1.1.1.1.1.1')], '400')\n client.make_request(self.get_request, end_stream=True)\n client.wait_for_response(3)\n client.stream_id = 1\n client.make_request('body', end_stream=True)\n client.wait_for_response(3)\n self.assertRaises(StreamClosedError, client.h2_connection.\n _get_stream_by_id, stream_with_rst)\n self.assertFalse(client.connection_is_closed(),\n 'Tempesta closed connection after sending RST_STREAM.')\n <mask token>\n <mask token>\n <mask token>\n\n def test_double_header_frame_in_single_stream(self):\n client = self.get_client('deproxy')\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.make_request(self.post_request, end_stream=False)\n client.make_request([('header1', 'header value1')], end_stream=True)\n self.assertTrue(client.wait_for_connection_close())\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n\n def __assert_test(self, client, request_body: str, request_number: int):\n server = self.get_server('deproxy')\n self.assertTrue(client.wait_for_response(timeout=5))\n self.assertEqual(client.last_response.status, '200')\n self.assertEqual(len(server.requests), request_number)\n checks.check_tempesta_request_and_response_stats(tempesta=self.\n get_tempesta(), cl_msg_received=request_number,\n cl_msg_forwarded=request_number, srv_msg_received=\n request_number, srv_msg_forwarded=request_number)\n error_msg = 'Malformed request from Tempesta.'\n self.assertEqual(server.last_request.method, self.post_request[3][1\n ], error_msg)\n self.assertEqual(server.last_request.headers['host'], self.\n post_request[0][1], error_msg)\n self.assertEqual(server.last_request.uri, self.post_request[1][1],\n error_msg)\n self.assertEqual(server.last_request.body, request_body)\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoBase(H2Base):\n\n def setup_tests(self):\n self.start_all_services()\n client = self.get_client('deproxy')\n server = self.get_server('deproxy')\n client.update_initial_settings(header_table_size=512)\n client.send_bytes(client.h2_connection.data_to_send())\n client.wait_for_ack_settings()\n return client, server\n\n\n<mask token>\n\n\nclass TestH2FrameEnabledDisabledTsoGroGso(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n\n def test_headers_frame_with_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n\n def test_headers_frame_without_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n\n def test_data_frame(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_d(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_e(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_invalid_req(self, client, server):\n client.send_request(request=[HeaderTuple(':authority', 'bad.com'),\n HeaderTuple(':path', '/'), HeaderTuple(':scheme', 'https'),\n HeaderTuple(':method', 'GET')], expected_status_code='403')\n\n def _test_data_frame(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 100000)\n\n def _test_headers_frame_with_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 0)\n\n def _test_headers_frame_without_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 1000, 0)\n\n def _test_headers_data_frames(self, client, server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.make_request(self.post_request)\n client.wait_for_response(5)\n self.assertFalse(client.connection_is_closed())\n self.assertEqual(client.last_response.status, '200',\n 'Status code mismatch.')\n self.assertIsNotNone(client.last_response.headers.get(header[0]))\n self.assertEqual(len(client.last_response.headers.get(header[0])),\n len(header[1]))\n self.assertEqual(len(client.last_response.body), body_len,\n 'Tempesta did not return full response body.')\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoStickyCookie(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n sticky {\n sticky_sessions;\n cookie enforce;\n secret \"f00)9eR59*_/22\";\n }\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_sticky_cookie_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_sticky_cookie_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_short(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 1000, 0)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_long(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 50000, 50000)\n\n def _test_headers_frame_for_local_resp_sticky_cookie(self, client,\n server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.send_request(request=self.post_request, expected_status_code\n ='302')\n self.post_request.append(HeaderTuple('Cookie', client.last_response\n .headers['set-cookie']))\n client.send_request(request=self.post_request, expected_status_code\n ='200')\n self.post_request.pop()\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoCache(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n cache_methods GET;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_cache_304_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_304_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_cache_304_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache_304_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache(self, client, server,\n header_len, body_len, date, status_code):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n headers = [HeaderTuple(':authority', 'example.com'), HeaderTuple(\n ':path', '/'), HeaderTuple(':scheme', 'https'), HeaderTuple(\n ':method', 'GET')]\n client.send_request(request=headers, expected_status_code='200')\n headers.append(HeaderTuple('if-modified-since', date))\n client.send_request(request=headers, expected_status_code=status_code)\n",
"step-4": "<mask token>\n__author__ = 'Tempesta Technologies, Inc.'\n__copyright__ = 'Copyright (C) 2023 Tempesta Technologies, Inc.'\n__license__ = 'GPL2'\n<mask token>\n\n\nclass TestH2Frame(H2Base):\n\n def test_data_framing(self):\n \"\"\"Send many 1 byte frames in request.\"\"\"\n self.start_all_services()\n deproxy_cl = self.get_client('deproxy')\n deproxy_cl.parsing = False\n request_body = 'x' * 100\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n self.__assert_test(client=deproxy_cl, request_body=request_body,\n request_number=1)\n\n def test_empty_last_data_frame(self):\n \"\"\"\n Send request with empty last data frame. It is valid request. RFC 9113 6.9.1.\n \"\"\"\n self.start_all_services()\n deproxy_cl = self.get_client('deproxy')\n deproxy_cl.parsing = False\n request_body = '123'\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n deproxy_cl.make_request(request=request_body, end_stream=False)\n deproxy_cl.make_request(request='', end_stream=True)\n self.__assert_test(client=deproxy_cl, request_body=request_body,\n request_number=1)\n\n def test_empty_data_frame(self):\n \"\"\"\n Send request with empty data frame. It is valid request. RFC 9113 10.5.\n \"\"\"\n self.start_all_services()\n deproxy_cl = self.get_client('deproxy')\n deproxy_cl.parsing = False\n request_body = '123'\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n deproxy_cl.make_request(request='', end_stream=False)\n deproxy_cl.make_request(request=request_body, end_stream=True)\n self.__assert_test(client=deproxy_cl, request_body=request_body,\n request_number=1)\n\n def test_settings_frame(self):\n \"\"\"\n Create tls connection and send preamble + correct settings frame.\n Tempesta must accept settings and return settings + ack settings frames.\n Then client send ack settings frame and Tempesta must correctly accept it.\n \"\"\"\n self.start_all_services(client=True)\n client: deproxy_client.DeproxyClientH2 = self.get_client('deproxy')\n self.initiate_h2_connection(client)\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n client.send_request(self.post_request, '200')\n\n def test_window_update_frame(self):\n \"\"\"Tempesta must handle WindowUpdate frame.\"\"\"\n self.start_all_services(client=True)\n client: deproxy_client.DeproxyClientH2 = self.get_client('deproxy')\n client.update_initial_settings()\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n self.assertTrue(client.wait_for_ack_settings())\n client.h2_connection.increment_flow_control_window(5000)\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n client.send_request(self.get_request, '200')\n self.assertFalse(client.connection_is_closed())\n\n def test_continuation_frame(self):\n \"\"\"Tempesta must handle CONTINUATION frame.\"\"\"\n self.start_all_services()\n client: deproxy_client.DeproxyClientH2 = self.get_client('deproxy')\n client.update_initial_settings()\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n client.send_request(request=self.get_request + [('qwerty', 'x' * \n 5000) for _ in range(4)], expected_status_code='200')\n self.assertFalse(client.connection_is_closed())\n\n def test_rst_frame_in_request(self):\n \"\"\"\n Tempesta must handle RST_STREAM frame and close stream but other streams MUST work.\n \"\"\"\n client = self.get_client('deproxy')\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.make_request(request=self.post_request, end_stream=False)\n client.stream_id = 3\n client.make_request(request=self.post_request, end_stream=False)\n client.h2_connection.reset_stream(stream_id=1, error_code=0)\n client.send_bytes(client.h2_connection.data_to_send())\n client.send_request('qwe', '200')\n client.stream_id = 5\n client.send_request(self.post_request, '200')\n self.assertFalse(client.connection_is_closed(),\n 'Tempesta closed connection after receiving RST_STREAM.')\n\n def test_rst_frame_in_response(self):\n \"\"\"\n When Tempesta returns RST_STREAM:\n - open streams must not be closed;\n - new streams must be accepted.\n \"\"\"\n client = self.get_client('deproxy')\n client.parsing = False\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.make_request(request=self.post_request, end_stream=False)\n stream_with_rst = 3\n client.stream_id = stream_with_rst\n client.send_request(self.get_request + [('x-forwarded-for',\n '1.1.1.1.1.1')], '400')\n client.make_request(self.get_request, end_stream=True)\n client.wait_for_response(3)\n client.stream_id = 1\n client.make_request('body', end_stream=True)\n client.wait_for_response(3)\n self.assertRaises(StreamClosedError, client.h2_connection.\n _get_stream_by_id, stream_with_rst)\n self.assertFalse(client.connection_is_closed(),\n 'Tempesta closed connection after sending RST_STREAM.')\n\n def test_rst_stream_with_id_0(self):\n \"\"\"\n RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame\n is received with a stream identifier of 0x00, the recipient MUST treat this\n as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.\n RFC 9113 6.4\n \"\"\"\n client = self.get_client('deproxy')\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.send_bytes(\n b'\\x00\\x00\\x04\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00')\n self.assertTrue(client.wait_for_connection_close(1),\n 'Tempesta did not close connection after receiving RST_STREAM with id 0.'\n )\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n\n def test_goaway_frame_in_response(self):\n \"\"\"\n Tempesta must:\n - close all streams for connection error (GOAWAY);\n - return last_stream_id.\n\n There is an inherent race condition between an endpoint starting new streams\n and the remote peer sending a GOAWAY frame. To deal with this case, the GOAWAY\n contains the stream identifier of the last peer-initiated stream that was or\n might be processed on the sending endpoint in this connection. For instance,\n if the server sends a GOAWAY frame, the identified stream is the highest-numbered\n stream initiated by the client.\n RFC 9113 6.8\n \"\"\"\n client = self.get_client('deproxy')\n self.start_all_services()\n self.initiate_h2_connection(client)\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=self.post_request, end_stream=False)\n client.send_bytes(b'\\x00\\x00\\x03\\x00\\x01\\x00\\x00\\x00\\x00asd')\n self.assertTrue(client.wait_for_connection_close(3),\n 'Tempesta did not send GOAWAY frame.')\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n self.assertEqual(client.last_stream_id, stream_id,\n 'Tempesta returned invalid last_stream_id in GOAWAY frame.')\n\n def test_goaway_frame_in_request(self):\n \"\"\"\n Tempesta must not close connection after receiving GOAWAY frame.\n\n GOAWAY allows an endpoint to gracefully stop accepting new streams while still\n finishing processing of previously established streams.\n RFC 9113 6.8\n \"\"\"\n client = self.get_client('deproxy')\n self.start_all_services()\n self.initiate_h2_connection(client)\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=self.post_request, end_stream=False)\n client.send_bytes(\n b'\\x00\\x00\\x08\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01'\n )\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request='asd', end_stream=True)\n self.assertTrue(client.wait_for_response(),\n 'Tempesta closed connection after receiving GOAWAY frame.')\n\n def test_double_header_frame_in_single_stream(self):\n client = self.get_client('deproxy')\n self.start_all_services()\n self.initiate_h2_connection(client)\n client.make_request(self.post_request, end_stream=False)\n client.make_request([('header1', 'header value1')], end_stream=True)\n self.assertTrue(client.wait_for_connection_close())\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n\n def __assert_test(self, client, request_body: str, request_number: int):\n server = self.get_server('deproxy')\n self.assertTrue(client.wait_for_response(timeout=5))\n self.assertEqual(client.last_response.status, '200')\n self.assertEqual(len(server.requests), request_number)\n checks.check_tempesta_request_and_response_stats(tempesta=self.\n get_tempesta(), cl_msg_received=request_number,\n cl_msg_forwarded=request_number, srv_msg_received=\n request_number, srv_msg_forwarded=request_number)\n error_msg = 'Malformed request from Tempesta.'\n self.assertEqual(server.last_request.method, self.post_request[3][1\n ], error_msg)\n self.assertEqual(server.last_request.headers['host'], self.\n post_request[0][1], error_msg)\n self.assertEqual(server.last_request.uri, self.post_request[1][1],\n error_msg)\n self.assertEqual(server.last_request.body, request_body)\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoBase(H2Base):\n\n def setup_tests(self):\n self.start_all_services()\n client = self.get_client('deproxy')\n server = self.get_server('deproxy')\n client.update_initial_settings(header_table_size=512)\n client.send_bytes(client.h2_connection.data_to_send())\n client.wait_for_ack_settings()\n return client, server\n\n\nDEFAULT_MTU = 1500\n\n\nclass TestH2FrameEnabledDisabledTsoGroGso(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n\n def test_headers_frame_with_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_with_continuation, DEFAULT_MTU)\n\n def test_headers_frame_without_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_without_continuation, DEFAULT_MTU)\n\n def test_data_frame(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_data_frame, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_d(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_e(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_invalid_req(self, client, server):\n client.send_request(request=[HeaderTuple(':authority', 'bad.com'),\n HeaderTuple(':path', '/'), HeaderTuple(':scheme', 'https'),\n HeaderTuple(':method', 'GET')], expected_status_code='403')\n\n def _test_data_frame(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 100000)\n\n def _test_headers_frame_with_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 0)\n\n def _test_headers_frame_without_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 1000, 0)\n\n def _test_headers_data_frames(self, client, server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.make_request(self.post_request)\n client.wait_for_response(5)\n self.assertFalse(client.connection_is_closed())\n self.assertEqual(client.last_response.status, '200',\n 'Status code mismatch.')\n self.assertIsNotNone(client.last_response.headers.get(header[0]))\n self.assertEqual(len(client.last_response.headers.get(header[0])),\n len(header[1]))\n self.assertEqual(len(client.last_response.body), body_len,\n 'Tempesta did not return full response body.')\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoStickyCookie(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n sticky {\n sticky_sessions;\n cookie enforce;\n secret \"f00)9eR59*_/22\";\n }\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_sticky_cookie_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_sticky_cookie_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_short(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 1000, 0)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_long(self, client,\n server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client,\n server, 50000, 50000)\n\n def _test_headers_frame_for_local_resp_sticky_cookie(self, client,\n server, header_len, body_len):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n client.send_request(request=self.post_request, expected_status_code\n ='302')\n self.post_request.append(HeaderTuple('Cookie', client.last_response\n .headers['set-cookie']))\n client.send_request(request=self.post_request, expected_status_code\n ='200')\n self.post_request.pop()\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoCache(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {'config':\n \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n cache_methods GET;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_cache_304_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_304_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_cache_200_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self.\n _test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU)\n\n def _test_headers_frame_for_local_resp_cache_304_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_short(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 1000,\n 0, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache_304_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2024 13:59:39 GMT', '304')\n\n def _test_headers_frame_for_local_resp_cache_200_long(self, client, server\n ):\n self._test_headers_frame_for_local_resp_cache(client, server, 50000,\n 100000, 'Mon, 12 Dec 2020 13:59:39 GMT', '200')\n\n def _test_headers_frame_for_local_resp_cache(self, client, server,\n header_len, body_len, date, status_code):\n header = 'qwerty', 'x' * header_len\n server.set_response('HTTP/1.1 200 OK\\r\\n' + 'Date: test\\r\\n' +\n f'Server: debian\\r\\n{header[0]}: {header[1]}\\r\\n' +\n f'Content-Length: {body_len}\\r\\n\\r\\n' + 'x' * body_len)\n headers = [HeaderTuple(':authority', 'example.com'), HeaderTuple(\n ':path', '/'), HeaderTuple(':scheme', 'https'), HeaderTuple(\n ':method', 'GET')]\n client.send_request(request=headers, expected_status_code='200')\n headers.append(HeaderTuple('if-modified-since', date))\n client.send_request(request=headers, expected_status_code=status_code)\n",
"step-5": "\"\"\"Functional tests for h2 frames.\"\"\"\n\n__author__ = \"Tempesta Technologies, Inc.\"\n__copyright__ = \"Copyright (C) 2023 Tempesta Technologies, Inc.\"\n__license__ = \"GPL2\"\n\nfrom h2.errors import ErrorCodes\nfrom h2.exceptions import StreamClosedError\n\nfrom framework import deproxy_client, tester\nfrom helpers import checks_for_tests as checks\nfrom http2_general.helpers import H2Base\nfrom helpers.networker import NetWorker\nfrom hpack import HeaderTuple\n\n\nclass TestH2Frame(H2Base):\n def test_data_framing(self):\n \"\"\"Send many 1 byte frames in request.\"\"\"\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"x\" * 100\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)\n\n def test_empty_last_data_frame(self):\n \"\"\"\n Send request with empty last data frame. It is valid request. RFC 9113 6.9.1.\n \"\"\"\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"123\"\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n deproxy_cl.make_request(request=request_body, end_stream=False)\n deproxy_cl.make_request(request=\"\", end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)\n\n def test_empty_data_frame(self):\n \"\"\"\n Send request with empty data frame. It is valid request. RFC 9113 10.5.\n \"\"\"\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"123\"\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n deproxy_cl.make_request(request=\"\", end_stream=False)\n deproxy_cl.make_request(request=request_body, end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)\n\n def test_settings_frame(self):\n \"\"\"\n Create tls connection and send preamble + correct settings frame.\n Tempesta must accept settings and return settings + ack settings frames.\n Then client send ack settings frame and Tempesta must correctly accept it.\n \"\"\"\n self.start_all_services(client=True)\n\n client: deproxy_client.DeproxyClientH2 = self.get_client(\"deproxy\")\n\n # initiate_connection() generates preamble + settings frame with default variables\n self.initiate_h2_connection(client)\n\n # send empty setting frame with ack flag.\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n\n # send header frame after exchanging settings and make sure\n # that connection is open.\n client.send_request(self.post_request, \"200\")\n\n def test_window_update_frame(self):\n \"\"\"Tempesta must handle WindowUpdate frame.\"\"\"\n self.start_all_services(client=True)\n\n client: deproxy_client.DeproxyClientH2 = self.get_client(\"deproxy\")\n\n # add preamble + settings frame with SETTING_INITIAL_WINDOW_SIZE = 65535\n client.update_initial_settings()\n\n # send preamble + settings frame\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n self.assertTrue(client.wait_for_ack_settings())\n\n # send WindowUpdate frame with window size increment = 5000\n client.h2_connection.increment_flow_control_window(5000)\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n\n # send header frame after sending WindowUpdate and make sure\n # that connection is working correctly.\n client.send_request(self.get_request, \"200\")\n self.assertFalse(client.connection_is_closed())\n\n def test_continuation_frame(self):\n \"\"\"Tempesta must handle CONTINUATION frame.\"\"\"\n self.start_all_services()\n\n client: deproxy_client.DeproxyClientH2 = self.get_client(\"deproxy\")\n\n client.update_initial_settings()\n client.send_bytes(client.h2_connection.data_to_send())\n client.h2_connection.clear_outbound_data_buffer()\n\n # H2Connection separates headers to HEADERS + CONTINUATION frames\n # if they are larger than 16384 bytes\n client.send_request(\n request=self.get_request + [(\"qwerty\", \"x\" * 5000) for _ in range(4)],\n expected_status_code=\"200\",\n )\n\n self.assertFalse(client.connection_is_closed())\n\n def test_rst_frame_in_request(self):\n \"\"\"\n Tempesta must handle RST_STREAM frame and close stream but other streams MUST work.\n \"\"\"\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # client opens streams with id 1, 3 and does not close them\n client.make_request(request=self.post_request, end_stream=False)\n client.stream_id = 3\n client.make_request(request=self.post_request, end_stream=False)\n\n # client send RST_STREAM frame with NO_ERROR code in stream 1 and\n # Tempesta closes it for itself.\n client.h2_connection.reset_stream(stream_id=1, error_code=0)\n client.send_bytes(client.h2_connection.data_to_send())\n\n # Client send DATA frame in stream 3 and it MUST receive response\n client.send_request(\"qwe\", \"200\")\n\n # Tempesta allows creating new streams.\n client.stream_id = 5\n client.send_request(self.post_request, \"200\")\n\n self.assertFalse(\n client.connection_is_closed(), \"Tempesta closed connection after receiving RST_STREAM.\"\n )\n\n def test_rst_frame_in_response(self):\n \"\"\"\n When Tempesta returns RST_STREAM:\n - open streams must not be closed;\n - new streams must be accepted.\n \"\"\"\n client = self.get_client(\"deproxy\")\n client.parsing = False\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # client opens stream with id 1 and does not close it\n client.make_request(request=self.post_request, end_stream=False)\n\n # client send invalid request and Tempesta returns RST_STREAM\n stream_with_rst = 3\n client.stream_id = stream_with_rst\n client.send_request(self.get_request + [(\"x-forwarded-for\", \"1.1.1.1.1.1\")], \"400\")\n\n # client open new stream\n client.make_request(self.get_request, end_stream=True)\n client.wait_for_response(3)\n\n # client send DATA frame in stream 1 and it must be open.\n client.stream_id = 1\n client.make_request(\"body\", end_stream=True)\n client.wait_for_response(3)\n\n self.assertRaises(\n StreamClosedError, client.h2_connection._get_stream_by_id, stream_with_rst\n )\n self.assertFalse(\n client.connection_is_closed(), \"Tempesta closed connection after sending RST_STREAM.\"\n )\n\n def test_rst_stream_with_id_0(self):\n \"\"\"\n RST_STREAM frames MUST be associated with a stream. If a RST_STREAM frame\n is received with a stream identifier of 0x00, the recipient MUST treat this\n as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.\n RFC 9113 6.4\n \"\"\"\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # send RST_STREAM with id 0\n client.send_bytes(b\"\\x00\\x00\\x04\\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\")\n\n self.assertTrue(\n client.wait_for_connection_close(1),\n \"Tempesta did not close connection after receiving RST_STREAM with id 0.\",\n )\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n\n def test_goaway_frame_in_response(self):\n \"\"\"\n Tempesta must:\n - close all streams for connection error (GOAWAY);\n - return last_stream_id.\n\n There is an inherent race condition between an endpoint starting new streams\n and the remote peer sending a GOAWAY frame. To deal with this case, the GOAWAY\n contains the stream identifier of the last peer-initiated stream that was or\n might be processed on the sending endpoint in this connection. For instance,\n if the server sends a GOAWAY frame, the identified stream is the highest-numbered\n stream initiated by the client.\n RFC 9113 6.8\n \"\"\"\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # Client opens many streams and does not close them\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=self.post_request, end_stream=False)\n\n # Client send DATA frame with stream id 0.\n # Tempesta MUST return GOAWAY frame with PROTOCOL_ERROR\n client.send_bytes(b\"\\x00\\x00\\x03\\x00\\x01\\x00\\x00\\x00\\x00asd\")\n\n self.assertTrue(client.wait_for_connection_close(3), \"Tempesta did not send GOAWAY frame.\")\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n self.assertEqual(\n client.last_stream_id,\n stream_id,\n \"Tempesta returned invalid last_stream_id in GOAWAY frame.\",\n )\n\n def test_goaway_frame_in_request(self):\n \"\"\"\n Tempesta must not close connection after receiving GOAWAY frame.\n\n GOAWAY allows an endpoint to gracefully stop accepting new streams while still\n finishing processing of previously established streams.\n RFC 9113 6.8\n \"\"\"\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n # Client opens many streams and does not close them\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=self.post_request, end_stream=False)\n\n # Client send GOAWAY frame with PROTOCOL_ERROR as bytes\n # because `_terminate_connection` method changes state machine to closed\n client.send_bytes(b\"\\x00\\x00\\x08\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x01\")\n\n # Client sends frames in already open streams.\n # Tempesta must handle these frames and must not close streams,\n # because sender closes connection, but not receiver.\n for stream_id in range(1, 6, 2):\n client.stream_id = stream_id\n client.make_request(request=\"asd\", end_stream=True)\n\n self.assertTrue(\n client.wait_for_response(), \"Tempesta closed connection after receiving GOAWAY frame.\"\n )\n\n def test_double_header_frame_in_single_stream(self):\n client = self.get_client(\"deproxy\")\n\n self.start_all_services()\n self.initiate_h2_connection(client)\n\n client.make_request(self.post_request, end_stream=False)\n client.make_request([(\"header1\", \"header value1\")], end_stream=True)\n\n self.assertTrue(client.wait_for_connection_close())\n self.assertIn(ErrorCodes.PROTOCOL_ERROR, client.error_codes)\n\n def __assert_test(self, client, request_body: str, request_number: int):\n server = self.get_server(\"deproxy\")\n\n self.assertTrue(client.wait_for_response(timeout=5))\n self.assertEqual(client.last_response.status, \"200\")\n self.assertEqual(len(server.requests), request_number)\n checks.check_tempesta_request_and_response_stats(\n tempesta=self.get_tempesta(),\n cl_msg_received=request_number,\n cl_msg_forwarded=request_number,\n srv_msg_received=request_number,\n srv_msg_forwarded=request_number,\n )\n error_msg = \"Malformed request from Tempesta.\"\n self.assertEqual(server.last_request.method, self.post_request[3][1], error_msg)\n self.assertEqual(server.last_request.headers[\"host\"], self.post_request[0][1], error_msg)\n self.assertEqual(server.last_request.uri, self.post_request[1][1], error_msg)\n self.assertEqual(server.last_request.body, request_body)\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoBase(H2Base):\n def setup_tests(self):\n self.start_all_services()\n client = self.get_client(\"deproxy\")\n server = self.get_server(\"deproxy\")\n\n client.update_initial_settings(header_table_size=512)\n client.send_bytes(client.h2_connection.data_to_send())\n client.wait_for_ack_settings()\n\n return client, server\n\n\nDEFAULT_MTU = 1500\n\n\nclass TestH2FrameEnabledDisabledTsoGroGso(TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n def test_headers_frame_with_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_with_continuation, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_with_continuation, DEFAULT_MTU\n )\n\n def test_headers_frame_without_continuation(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_without_continuation, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_without_continuation, DEFAULT_MTU\n )\n\n def test_data_frame(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(client, server, self._test_data_frame, DEFAULT_MTU)\n self.run_test_tso_gro_gso_enabled(client, server, self._test_data_frame, DEFAULT_MTU)\n\n def test_headers_frame_for_local_resp_invalid_req_d(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_invalid_req_e(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_invalid_req, DEFAULT_MTU\n )\n\n def _test_headers_frame_for_local_resp_invalid_req(self, client, server):\n client.send_request(\n request=[\n HeaderTuple(\":authority\", \"bad.com\"),\n HeaderTuple(\":path\", \"/\"),\n HeaderTuple(\":scheme\", \"https\"),\n HeaderTuple(\":method\", \"GET\"),\n ],\n expected_status_code=\"403\",\n )\n\n def _test_data_frame(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 100000)\n\n def _test_headers_frame_with_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 50000, 0)\n\n def _test_headers_frame_without_continuation(self, client, server):\n self._test_headers_data_frames(client, server, 1000, 0)\n\n def _test_headers_data_frames(self, client, server, header_len, body_len):\n header = (\"qwerty\", \"x\" * header_len)\n server.set_response(\n \"HTTP/1.1 200 OK\\r\\n\" + \"Date: test\\r\\n\" + \"Server: debian\\r\\n\"\n f\"{header[0]}: {header[1]}\\r\\n\"\n + f\"Content-Length: {body_len}\\r\\n\\r\\n\"\n + (\"x\" * body_len)\n )\n\n client.make_request(self.post_request)\n client.wait_for_response(5)\n\n self.assertFalse(client.connection_is_closed())\n self.assertEqual(client.last_response.status, \"200\", \"Status code mismatch.\")\n self.assertIsNotNone(client.last_response.headers.get(header[0]))\n self.assertEqual(len(client.last_response.headers.get(header[0])), len(header[1]))\n self.assertEqual(\n len(client.last_response.body), body_len, \"Tempesta did not return full response body.\"\n )\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoStickyCookie(\n TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker\n):\n tempesta = {\n \"config\": \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n sticky {\n sticky_sessions;\n cookie enforce;\n secret \"f00)9eR59*_/22\";\n }\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_sticky_cookie_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_sticky_cookie_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_sticky_cookie_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_sticky_cookie_long, DEFAULT_MTU\n )\n\n def _test_headers_frame_for_local_resp_sticky_cookie_short(self, client, server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client, server, 1000, 0)\n\n def _test_headers_frame_for_local_resp_sticky_cookie_long(self, client, server):\n self._test_headers_frame_for_local_resp_sticky_cookie(client, server, 50000, 50000)\n\n def _test_headers_frame_for_local_resp_sticky_cookie(\n self, client, server, header_len, body_len\n ):\n header = (\"qwerty\", \"x\" * header_len)\n server.set_response(\n \"HTTP/1.1 200 OK\\r\\n\" + \"Date: test\\r\\n\" + \"Server: debian\\r\\n\"\n f\"{header[0]}: {header[1]}\\r\\n\"\n + f\"Content-Length: {body_len}\\r\\n\\r\\n\"\n + (\"x\" * body_len)\n )\n\n client.send_request(request=self.post_request, expected_status_code=\"302\")\n self.post_request.append(HeaderTuple(\"Cookie\", client.last_response.headers[\"set-cookie\"]))\n client.send_request(request=self.post_request, expected_status_code=\"200\")\n self.post_request.pop()\n\n\nclass TestH2FrameEnabledDisabledTsoGroGsoCache(TestH2FrameEnabledDisabledTsoGroGsoBase, NetWorker):\n tempesta = {\n \"config\": \"\"\"\n listen 443 proto=h2;\n srv_group default {\n server ${server_ip}:8000;\n }\n vhost v_good {\n proxy_pass default;\n }\n tls_certificate ${tempesta_workdir}/tempesta.crt;\n tls_certificate_key ${tempesta_workdir}/tempesta.key;\n tls_match_any_server_name;\n cache 1;\n cache_fulfill * *;\n cache_methods GET;\n block_action attack reply;\n block_action error reply;\n http_chain {\n host == \"bad.com\" -> block;\n host == \"example.com\" -> v_good;\n }\n \"\"\"\n }\n\n def test_headers_frame_for_local_resp_cache_304_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_cache_304_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_cache_200_short(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_cache_200_short, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_cache_304_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_cache_304_long, DEFAULT_MTU\n )\n\n def test_headers_frame_for_local_resp_cache_200_long(self):\n client, server = self.setup_tests()\n self.run_test_tso_gro_gso_disabled(\n client, server, self._test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU\n )\n self.run_test_tso_gro_gso_enabled(\n client, server, self._test_headers_frame_for_local_resp_cache_200_long, DEFAULT_MTU\n )\n\n def _test_headers_frame_for_local_resp_cache_304_short(self, client, server):\n self._test_headers_frame_for_local_resp_cache(\n client, server, 1000, 0, \"Mon, 12 Dec 2024 13:59:39 GMT\", \"304\"\n )\n\n def _test_headers_frame_for_local_resp_cache_200_short(self, client, server):\n self._test_headers_frame_for_local_resp_cache(\n client, server, 1000, 0, \"Mon, 12 Dec 2020 13:59:39 GMT\", \"200\"\n )\n\n def _test_headers_frame_for_local_resp_cache_304_long(self, client, server):\n self._test_headers_frame_for_local_resp_cache(\n client, server, 50000, 100000, \"Mon, 12 Dec 2024 13:59:39 GMT\", \"304\"\n )\n\n def _test_headers_frame_for_local_resp_cache_200_long(self, client, server):\n self._test_headers_frame_for_local_resp_cache(\n client, server, 50000, 100000, \"Mon, 12 Dec 2020 13:59:39 GMT\", \"200\"\n )\n\n def _test_headers_frame_for_local_resp_cache(\n self, client, server, header_len, body_len, date, status_code\n ):\n header = (\"qwerty\", \"x\" * header_len)\n server.set_response(\n \"HTTP/1.1 200 OK\\r\\n\" + \"Date: test\\r\\n\" + \"Server: debian\\r\\n\"\n f\"{header[0]}: {header[1]}\\r\\n\"\n + f\"Content-Length: {body_len}\\r\\n\\r\\n\"\n + (\"x\" * body_len)\n )\n\n headers = [\n HeaderTuple(\":authority\", \"example.com\"),\n HeaderTuple(\":path\", \"/\"),\n HeaderTuple(\":scheme\", \"https\"),\n HeaderTuple(\":method\", \"GET\"),\n ]\n\n client.send_request(request=headers, expected_status_code=\"200\")\n\n headers.append(HeaderTuple(\"if-modified-since\", date))\n client.send_request(request=headers, expected_status_code=status_code)\n",
"step-ids": [
27,
33,
37,
46,
48
]
}
|
[
27,
33,
37,
46,
48
] |
#/usr/bin/env python3
def nth_prime(n):
ans = 2
known = []
for _ in range(n):
while not all(ans%x != 0 for x in known):
ans += 1
known.append(ans)
return ans
if __name__ == "__main__":
n = int(input("Which one? "))
print(nth_prime(n))
|
normal
|
{
"blob_id": "21fb9622add4d19b2914118e3afd3867b2368a50",
"index": 4913,
"step-1": "<mask token>\n",
"step-2": "def nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans % x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\n\n<mask token>\n",
"step-3": "def nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans % x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\n\nif __name__ == '__main__':\n n = int(input('Which one? '))\n print(nth_prime(n))\n",
"step-4": "#/usr/bin/env python3\n\ndef nth_prime(n):\n ans = 2\n known = []\n for _ in range(n):\n while not all(ans%x != 0 for x in known):\n ans += 1\n known.append(ans)\n return ans\n\nif __name__ == \"__main__\":\n n = int(input(\"Which one? \"))\n print(nth_prime(n))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(b - 1):
if (i + 1) * a % b == c:
frag = 'YES'
break
print(frag)
<|reserved_special_token_1|>
a, b, c = map(int, input().split())
frag = 'NO'
for i in range(b - 1):
if (i + 1) * a % b == c:
frag = 'YES'
break
print(frag)
|
flexible
|
{
"blob_id": "6ad36f2b115c822a50a38e88a8d7d524fc5b045b",
"index": 195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(b - 1):\n if (i + 1) * a % b == c:\n frag = 'YES'\n break\nprint(frag)\n",
"step-3": "a, b, c = map(int, input().split())\nfrag = 'NO'\nfor i in range(b - 1):\n if (i + 1) * a % b == c:\n frag = 'YES'\n break\nprint(frag)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def compute_reward(score_batch, input_lengths, output, sentences_batch,
reference_batch, device, sentence_lengths_batch, number_of_sample=5,
lamb=0.1):
reward_batch = []
rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)
for i_data in range(len(input_lengths)):
doc_length = input_lengths[i_data]
scores = score_batch[i_data, :doc_length]
sentence_lengths = sentence_lengths_batch[i_data]
sentence_embed = output[:doc_length, i_data, :]
sentences = sentences_batch[i_data]
reference = reference_batch[i_data]
result, prob, selected = greedy_nommr(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_greedy = get_rouge_single(result, reference)
result, prob, selected = greedy_max(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_hi = get_rouge_single(result, reference)
final_choice = selected
reward_batch.append(reward_hi - reward_greedy)
rl_label_batch[final_choice, i_data, :] = 1
reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)
rl_label_batch = rl_label_batch.to(device)
reward_batch.requires_grad_(False)
return reward_batch, rl_label_batch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def greedy_max(doc_length, px, sentence_embed, sentences, device,
sentence_lengths, length_limit=200, lamb=0.2):
"""
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
"""
x = list(range(doc_length))
px = px.cpu().numpy()
score = px
prob = 1
summary_representation = []
bias = np.ones(px.shape)
selected = []
wc = 0
lengths = []
summary = []
while wc <= length_limit:
sample = np.argmax(score)
selected.append(sample)
wc += sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
summary_representation.append(sentence_embed[sample])
s = torch.stack(summary_representation, 1).unsqueeze(0)
all_sent = sentence_embed[:doc_length, :].unsqueeze(2)
redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0
].cpu().numpy()
score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
<|reserved_special_token_0|>
def compute_reward(score_batch, input_lengths, output, sentences_batch,
reference_batch, device, sentence_lengths_batch, number_of_sample=5,
lamb=0.1):
reward_batch = []
rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)
for i_data in range(len(input_lengths)):
doc_length = input_lengths[i_data]
scores = score_batch[i_data, :doc_length]
sentence_lengths = sentence_lengths_batch[i_data]
sentence_embed = output[:doc_length, i_data, :]
sentences = sentences_batch[i_data]
reference = reference_batch[i_data]
result, prob, selected = greedy_nommr(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_greedy = get_rouge_single(result, reference)
result, prob, selected = greedy_max(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_hi = get_rouge_single(result, reference)
final_choice = selected
reward_batch.append(reward_hi - reward_greedy)
rl_label_batch[final_choice, i_data, :] = 1
reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)
rl_label_batch = rl_label_batch.to(device)
reward_batch.requires_grad_(False)
return reward_batch, rl_label_batch
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def greedy_max(doc_length, px, sentence_embed, sentences, device,
sentence_lengths, length_limit=200, lamb=0.2):
"""
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
"""
x = list(range(doc_length))
px = px.cpu().numpy()
score = px
prob = 1
summary_representation = []
bias = np.ones(px.shape)
selected = []
wc = 0
lengths = []
summary = []
while wc <= length_limit:
sample = np.argmax(score)
selected.append(sample)
wc += sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
summary_representation.append(sentence_embed[sample])
s = torch.stack(summary_representation, 1).unsqueeze(0)
all_sent = sentence_embed[:doc_length, :].unsqueeze(2)
redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0
].cpu().numpy()
score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
def greedy_nommr(doc_length, px, sentence_embed, sentences, device,
sentence_lengths, length_limit=200, lamb=0.2):
"""
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
"""
x = list(range(doc_length))
px = px.cpu().numpy()
score = px
prob = 1
bias = np.ones(px.shape)
summary_representation = []
selected = []
wc = 0
lengths = []
summary = []
while wc <= length_limit:
sample = np.argmax(score)
selected.append(sample)
wc += sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
def compute_reward(score_batch, input_lengths, output, sentences_batch,
reference_batch, device, sentence_lengths_batch, number_of_sample=5,
lamb=0.1):
reward_batch = []
rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)
for i_data in range(len(input_lengths)):
doc_length = input_lengths[i_data]
scores = score_batch[i_data, :doc_length]
sentence_lengths = sentence_lengths_batch[i_data]
sentence_embed = output[:doc_length, i_data, :]
sentences = sentences_batch[i_data]
reference = reference_batch[i_data]
result, prob, selected = greedy_nommr(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_greedy = get_rouge_single(result, reference)
result, prob, selected = greedy_max(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_hi = get_rouge_single(result, reference)
final_choice = selected
reward_batch.append(reward_hi - reward_greedy)
rl_label_batch[final_choice, i_data, :] = 1
reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)
rl_label_batch = rl_label_batch.to(device)
reward_batch.requires_grad_(False)
return reward_batch, rl_label_batch
<|reserved_special_token_1|>
from scipy.stats import rv_discrete
import torch
import torch.nn.functional as F
import numpy as np
from utils import *
def greedy_max(doc_length, px, sentence_embed, sentences, device,
sentence_lengths, length_limit=200, lamb=0.2):
"""
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
"""
x = list(range(doc_length))
px = px.cpu().numpy()
score = px
prob = 1
summary_representation = []
bias = np.ones(px.shape)
selected = []
wc = 0
lengths = []
summary = []
while wc <= length_limit:
sample = np.argmax(score)
selected.append(sample)
wc += sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
summary_representation.append(sentence_embed[sample])
s = torch.stack(summary_representation, 1).unsqueeze(0)
all_sent = sentence_embed[:doc_length, :].unsqueeze(2)
redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0
].cpu().numpy()
score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
def greedy_nommr(doc_length, px, sentence_embed, sentences, device,
sentence_lengths, length_limit=200, lamb=0.2):
"""
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
"""
x = list(range(doc_length))
px = px.cpu().numpy()
score = px
prob = 1
bias = np.ones(px.shape)
summary_representation = []
selected = []
wc = 0
lengths = []
summary = []
while wc <= length_limit:
sample = np.argmax(score)
selected.append(sample)
wc += sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
def compute_reward(score_batch, input_lengths, output, sentences_batch,
reference_batch, device, sentence_lengths_batch, number_of_sample=5,
lamb=0.1):
reward_batch = []
rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)
for i_data in range(len(input_lengths)):
doc_length = input_lengths[i_data]
scores = score_batch[i_data, :doc_length]
sentence_lengths = sentence_lengths_batch[i_data]
sentence_embed = output[:doc_length, i_data, :]
sentences = sentences_batch[i_data]
reference = reference_batch[i_data]
result, prob, selected = greedy_nommr(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_greedy = get_rouge_single(result, reference)
result, prob, selected = greedy_max(doc_length, scores,
sentence_embed, sentences, device, sentence_lengths, lamb=lamb)
reward_hi = get_rouge_single(result, reference)
final_choice = selected
reward_batch.append(reward_hi - reward_greedy)
rl_label_batch[final_choice, i_data, :] = 1
reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)
rl_label_batch = rl_label_batch.to(device)
reward_batch.requires_grad_(False)
return reward_batch, rl_label_batch
<|reserved_special_token_1|>
from scipy.stats import rv_discrete
import torch
import torch.nn.functional as F
import numpy as np
from utils import *
def greedy_max(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):
'''
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
'''
x = list(range(doc_length))
px = px.cpu().numpy()
score=px
prob = 1
summary_representation = []
bias = np.ones(px.shape)
selected = []
wc=0
lengths=[]
summary = []
while wc<=length_limit:
sample = np.argmax(score)
selected.append(sample)
wc+=sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
summary_representation.append(sentence_embed[sample])
s = torch.stack(summary_representation,1).unsqueeze(0)
all_sent = sentence_embed[:doc_length,:].unsqueeze(2)
redundancy_score =torch.max(F.cosine_similarity(all_sent,s,1),1)[0].cpu().numpy()
score = lamb*px - ((1-lamb)*redundancy_score) + (1-lamb)*bias
for i_sel in selected:
score[i_sel] = 0
# print(len(selected))
summary ='\n'.join(summary)
# summary_representation= summary_representation.to(device)
return summary, prob, selected
def greedy_nommr(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):
'''
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
'''
x = list(range(doc_length))
px = px.cpu().numpy()
score=px
prob = 1
bias = np.ones(px.shape)
summary_representation = []
selected = []
wc=0
lengths = []
summary=[]
while wc<=length_limit:
sample = np.argmax(score)
selected.append(sample)
wc+=sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
def compute_reward(score_batch,input_lengths,output,sentences_batch,reference_batch,device,sentence_lengths_batch,number_of_sample=5,lamb=0.1):
reward_batch = []
rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)
for i_data in range(len(input_lengths)):
# summary_i = summary_embed[i_data]
doc_length = input_lengths[i_data]
scores = score_batch[i_data,:doc_length]
sentence_lengths = sentence_lengths_batch[i_data]
sentence_embed = output[:doc_length,i_data,:]
sentences = sentences_batch[i_data]
reference = reference_batch[i_data]
# final_choice = None
result,prob,selected = greedy_nommr(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)
reward_greedy = get_rouge_single(result,reference)
result,prob,selected = greedy_max(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)
reward_hi = get_rouge_single(result,reference)
final_choice = selected
# print(reward_hi-reward_greedy)
reward_batch.append(reward_hi-reward_greedy)
rl_label_batch[final_choice,i_data,:] = 1
reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)
rl_label_batch = rl_label_batch.to(device)
reward_batch.requires_grad_(False)
return reward_batch,rl_label_batch
|
flexible
|
{
"blob_id": "cc6e827eec5256ce0dbe13958b6178c59bcd94a7",
"index": 8802,
"step-1": "<mask token>\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n",
"step-2": "<mask token>\n\n\ndef greedy_max(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n summary_representation = []\n bias = np.ones(px.shape)\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n summary_representation.append(sentence_embed[sample])\n s = torch.stack(summary_representation, 1).unsqueeze(0)\n all_sent = sentence_embed[:doc_length, :].unsqueeze(2)\n redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0\n ].cpu().numpy()\n score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\n<mask token>\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n",
"step-3": "<mask token>\n\n\ndef greedy_max(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n summary_representation = []\n bias = np.ones(px.shape)\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n summary_representation.append(sentence_embed[sample])\n s = torch.stack(summary_representation, 1).unsqueeze(0)\n all_sent = sentence_embed[:doc_length, :].unsqueeze(2)\n redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0\n ].cpu().numpy()\n score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef greedy_nommr(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n bias = np.ones(px.shape)\n summary_representation = []\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n",
"step-4": "from scipy.stats import rv_discrete\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom utils import *\n\n\ndef greedy_max(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n summary_representation = []\n bias = np.ones(px.shape)\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n summary_representation.append(sentence_embed[sample])\n s = torch.stack(summary_representation, 1).unsqueeze(0)\n all_sent = sentence_embed[:doc_length, :].unsqueeze(2)\n redundancy_score = torch.max(F.cosine_similarity(all_sent, s, 1), 1)[0\n ].cpu().numpy()\n score = lamb * px - (1 - lamb) * redundancy_score + (1 - lamb) * bias\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef greedy_nommr(doc_length, px, sentence_embed, sentences, device,\n sentence_lengths, length_limit=200, lamb=0.2):\n \"\"\"\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t\"\"\"\n x = list(range(doc_length))\n px = px.cpu().numpy()\n score = px\n prob = 1\n bias = np.ones(px.shape)\n summary_representation = []\n selected = []\n wc = 0\n lengths = []\n summary = []\n while wc <= length_limit:\n sample = np.argmax(score)\n selected.append(sample)\n wc += sentence_lengths[sample]\n lengths.append(sentence_lengths[sample])\n summary.append(sentences[sample])\n for i_sel in selected:\n score[i_sel] = 0\n summary = '\\n'.join(summary)\n return summary, prob, selected\n\n\ndef compute_reward(score_batch, input_lengths, output, sentences_batch,\n reference_batch, device, sentence_lengths_batch, number_of_sample=5,\n lamb=0.1):\n reward_batch = []\n rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n for i_data in range(len(input_lengths)):\n doc_length = input_lengths[i_data]\n scores = score_batch[i_data, :doc_length]\n sentence_lengths = sentence_lengths_batch[i_data]\n sentence_embed = output[:doc_length, i_data, :]\n sentences = sentences_batch[i_data]\n reference = reference_batch[i_data]\n result, prob, selected = greedy_nommr(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_greedy = get_rouge_single(result, reference)\n result, prob, selected = greedy_max(doc_length, scores,\n sentence_embed, sentences, device, sentence_lengths, lamb=lamb)\n reward_hi = get_rouge_single(result, reference)\n final_choice = selected\n reward_batch.append(reward_hi - reward_greedy)\n rl_label_batch[final_choice, i_data, :] = 1\n reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n rl_label_batch = rl_label_batch.to(device)\n reward_batch.requires_grad_(False)\n return reward_batch, rl_label_batch\n",
"step-5": "from scipy.stats import rv_discrete\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nfrom utils import *\n\n\ndef greedy_max(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):\n\t'''\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t'''\n\tx = list(range(doc_length))\n\tpx = px.cpu().numpy()\n\tscore=px\n\tprob = 1\n\tsummary_representation = []\n\tbias = np.ones(px.shape)\n\tselected = []\n\twc=0\n\tlengths=[]\n\tsummary = []\n\twhile wc<=length_limit:\n\t\tsample = np.argmax(score)\n\n\t\tselected.append(sample)\n\t\twc+=sentence_lengths[sample]\n\t\tlengths.append(sentence_lengths[sample])\n\t\tsummary.append(sentences[sample])\n\n\t\tsummary_representation.append(sentence_embed[sample])\n\t\ts = torch.stack(summary_representation,1).unsqueeze(0)\n\t\tall_sent = sentence_embed[:doc_length,:].unsqueeze(2)\n\t\tredundancy_score =torch.max(F.cosine_similarity(all_sent,s,1),1)[0].cpu().numpy()\n\n\t\tscore = lamb*px - ((1-lamb)*redundancy_score) + (1-lamb)*bias\n\t\tfor i_sel in selected:\n\t\t\tscore[i_sel] = 0\n\t\t# print(len(selected))\n\tsummary ='\\n'.join(summary)\n\t# summary_representation= summary_representation.to(device)\n\treturn summary, prob, selected\n\n\ndef greedy_nommr(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):\n\t'''\n\tprob: sum should be 1\n\tsentence embed: [doc_length, embed_dim]\n\t'''\n\tx = list(range(doc_length))\n\tpx = px.cpu().numpy()\n\tscore=px\n\tprob = 1\n\tbias = np.ones(px.shape)\n\tsummary_representation = []\n\n\tselected = []\n\twc=0\n\tlengths = []\n\tsummary=[]\n\twhile wc<=length_limit:\n\n\t\tsample = np.argmax(score)\n\t\tselected.append(sample)\n\t\twc+=sentence_lengths[sample]\n\t\tlengths.append(sentence_lengths[sample])\n\t\tsummary.append(sentences[sample])\n\n\t\tfor i_sel in selected:\n\t\t\tscore[i_sel] = 0\n\tsummary = '\\n'.join(summary)\n\treturn summary, prob, selected\n\n\ndef compute_reward(score_batch,input_lengths,output,sentences_batch,reference_batch,device,sentence_lengths_batch,number_of_sample=5,lamb=0.1):\n\treward_batch = []\n\trl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)\n\tfor i_data in range(len(input_lengths)):\n\t\t# summary_i = summary_embed[i_data]\n\t\tdoc_length = input_lengths[i_data]\n\t\tscores = score_batch[i_data,:doc_length]\n\t\tsentence_lengths = sentence_lengths_batch[i_data]\n\t\tsentence_embed = output[:doc_length,i_data,:]\n\t\tsentences = sentences_batch[i_data]\n\t\treference = reference_batch[i_data]\n\n\t\t# final_choice = None\n\t\tresult,prob,selected = greedy_nommr(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)\n\t\treward_greedy = get_rouge_single(result,reference)\n\n\t\tresult,prob,selected = greedy_max(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)\n\t\treward_hi = get_rouge_single(result,reference)\n\t\tfinal_choice = selected\n\n\t\t# print(reward_hi-reward_greedy)\n\t\treward_batch.append(reward_hi-reward_greedy)\n\t\trl_label_batch[final_choice,i_data,:] = 1\n\n\treward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)\n\trl_label_batch = rl_label_batch.to(device)\n\treward_batch.requires_grad_(False)\n\n\treturn reward_batch,rl_label_batch\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Kontrollülesanne 7.4c - Elutee number (tähtaeg 28.okt. (incl))
Maksimaalne failide arv: 1
Töö liik: Individuaaltöö
Numeroloogias peetakse tähtsaks elutee numbrit, mille arvutamiseks tuleb liita kokku sünnikuupäeva ja -aasta numbrid
nii, et jõutakse lõpuks ühe numbrini.
Näiteks, oletame, et sünnikuupäev on 15.05.1975. Teha tuleb niisiis järgnev tehe: 1+5+5+1+9+7+5 = 33, 3+3 = 6, seega on
elutee number 6.
Aga kui sünnikuupäevaks on nt. 17.11.1981, siis arvutada tuleb järgmiselt: 1+7+1+1+1+9+8+1 = 29, 2+9 = 11, 1+1=2.
Elutee numbrit arvutab järgmine (rekursiivne) funktsioon, mis võtab argumendiks sünnikuupäeva:
#argument s on sõne, esialgu see on kuupäev, edasi juba arvutatud arv
def elutee(s):
#abimuutaja numbri arvutamiseks
n = 0
# tsükkel, mis vaatab iga sümboli sõnes
for i in s:
if i != ".":
n += int(i) # arvutame summat
# kui saadud arv on väiksem kui 10, siis ongi elutee number käes
if n < 10:
return n
# kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,
#selleks kasutame jälle sama funktsiooni
else:
return elutee(str(n))
Failis sunnikuupaevad.txt on mingi hulk sünnikuupäevi, iga sünnikuupäev eraldi real. Kirjutada programm, mis tekitab
selle faili põhjal 9 tekstifaili nimedega eluteenumber1.txt, eluteenumber2.txt, ..., eluteenumber9.txt ning jagab
sünnikuupäevad nendesse failidesse vastavalt elutee numbrile (elutee numbri arvutamiseks kasutada funktsiooni elutee).
Näiteks sünnikuupäev 15.05.1975 tuleb kirjutada faili eluteenumber6.txt.
Näide programmi tööst:
Kui faili sunnikuupaevad.txt sisu on
07.02.1969
17.11.1981
29.03.1955
siis faili eluteenumber7.txt sisu peab olema
07.02.1969
29.03.1955
ja faili eluteenumber2.txt sisu peab olema
17.11.1981
Kõik ülejäänud 7 faili peavad selle näite korral küll tekkima, aga jääma tühjaks.
"""
def elutee(s):
#abimuutaja numbri arvutamiseks
n = 0
# tsükkel, mis vaatab iga sümboli sõnes
for i in s:
if i != ".":
n += int(i) # arvutame summat
# kui saadud arv on väiksem kui 10, siis ongi elutee number käes
if n < 10:
return n
# kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,
#selleks kasutame jälle sama funktsiooni
else:
return elutee(str(n))
for i in range(1,10):
fileName = "eluteenumber" + str(i) + ".txt"
f = open(fileName, "a")
# inputFile = input("Palun sisestage sünnikuupäevade faili nimi: ") TEST EI TAHA FAILI SISESTAMIST NÄHAGI!
file = open("sunnikuupaevad.txt", encoding="UTF-8")
for row in file:
fileName = "eluteenumber" + str(elutee(row.strip())) + ".txt"
file = open(fileName, "a", encoding="UTF-8")
file.write(str(row))
file.close()
file.close()
|
normal
|
{
"blob_id": "971187dc0e0f02282c8945940d07c011e247667a",
"index": 9401,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef elutee(s):\n n = 0\n for i in s:\n if i != '.':\n n += int(i)\n if n < 10:\n return n\n else:\n return elutee(str(n))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef elutee(s):\n n = 0\n for i in s:\n if i != '.':\n n += int(i)\n if n < 10:\n return n\n else:\n return elutee(str(n))\n\n\nfor i in range(1, 10):\n fileName = 'eluteenumber' + str(i) + '.txt'\n f = open(fileName, 'a')\n<mask token>\nfor row in file:\n fileName = 'eluteenumber' + str(elutee(row.strip())) + '.txt'\n file = open(fileName, 'a', encoding='UTF-8')\n file.write(str(row))\n file.close()\nfile.close()\n",
"step-4": "<mask token>\n\n\ndef elutee(s):\n n = 0\n for i in s:\n if i != '.':\n n += int(i)\n if n < 10:\n return n\n else:\n return elutee(str(n))\n\n\nfor i in range(1, 10):\n fileName = 'eluteenumber' + str(i) + '.txt'\n f = open(fileName, 'a')\nfile = open('sunnikuupaevad.txt', encoding='UTF-8')\nfor row in file:\n fileName = 'eluteenumber' + str(elutee(row.strip())) + '.txt'\n file = open(fileName, 'a', encoding='UTF-8')\n file.write(str(row))\n file.close()\nfile.close()\n",
"step-5": "\"\"\"\nKontrollülesanne 7.4c - Elutee number (tähtaeg 28.okt. (incl))\nMaksimaalne failide arv: 1\nTöö liik: Individuaaltöö\n\n\nNumeroloogias peetakse tähtsaks elutee numbrit, mille arvutamiseks tuleb liita kokku sünnikuupäeva ja -aasta numbrid\nnii, et jõutakse lõpuks ühe numbrini.\n\nNäiteks, oletame, et sünnikuupäev on 15.05.1975. Teha tuleb niisiis järgnev tehe: 1+5+5+1+9+7+5 = 33, 3+3 = 6, seega on\nelutee number 6.\n\nAga kui sünnikuupäevaks on nt. 17.11.1981, siis arvutada tuleb järgmiselt: 1+7+1+1+1+9+8+1 = 29, 2+9 = 11, 1+1=2.\n\nElutee numbrit arvutab järgmine (rekursiivne) funktsioon, mis võtab argumendiks sünnikuupäeva:\n\n#argument s on sõne, esialgu see on kuupäev, edasi juba arvutatud arv\ndef elutee(s):\n #abimuutaja numbri arvutamiseks\n n = 0\n # tsükkel, mis vaatab iga sümboli sõnes\n for i in s:\n if i != \".\":\n n += int(i) # arvutame summat\n # kui saadud arv on väiksem kui 10, siis ongi elutee number käes\n if n < 10:\n return n\n # kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,\n #selleks kasutame jälle sama funktsiooni\n else:\n return elutee(str(n))\nFailis sunnikuupaevad.txt on mingi hulk sünnikuupäevi, iga sünnikuupäev eraldi real. Kirjutada programm, mis tekitab\nselle faili põhjal 9 tekstifaili nimedega eluteenumber1.txt, eluteenumber2.txt, ..., eluteenumber9.txt ning jagab\nsünnikuupäevad nendesse failidesse vastavalt elutee numbrile (elutee numbri arvutamiseks kasutada funktsiooni elutee).\nNäiteks sünnikuupäev 15.05.1975 tuleb kirjutada faili eluteenumber6.txt.\n\nNäide programmi tööst:\n\nKui faili sunnikuupaevad.txt sisu on\n\n 07.02.1969\n 17.11.1981\n 29.03.1955\nsiis faili eluteenumber7.txt sisu peab olema\n\n 07.02.1969\n 29.03.1955\nja faili eluteenumber2.txt sisu peab olema\n\n 17.11.1981\nKõik ülejäänud 7 faili peavad selle näite korral küll tekkima, aga jääma tühjaks.\n\"\"\"\n\ndef elutee(s):\n #abimuutaja numbri arvutamiseks\n n = 0\n # tsükkel, mis vaatab iga sümboli sõnes\n for i in s:\n if i != \".\":\n n += int(i) # arvutame summat\n # kui saadud arv on väiksem kui 10, siis ongi elutee number käes\n if n < 10:\n return n\n # kui saadud arv on 10 või suurem, siis on vaja uuesti arvutada,\n #selleks kasutame jälle sama funktsiooni\n else:\n return elutee(str(n))\n\nfor i in range(1,10):\n fileName = \"eluteenumber\" + str(i) + \".txt\"\n f = open(fileName, \"a\")\n\n# inputFile = input(\"Palun sisestage sünnikuupäevade faili nimi: \") TEST EI TAHA FAILI SISESTAMIST NÄHAGI!\nfile = open(\"sunnikuupaevad.txt\", encoding=\"UTF-8\")\n\nfor row in file:\n fileName = \"eluteenumber\" + str(elutee(row.strip())) + \".txt\"\n file = open(fileName, \"a\", encoding=\"UTF-8\")\n file.write(str(row))\n file.close()\nfile.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#juego trivia hecho por mayu xD
print('¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas')
n1 = input('\n por favor dime como te llamas:')
print('\nmucho gusto', n1, ',empecemos')
puntaje = 0
print('me puedes decir con que comando en linux puedo listar la informacion de un directorio?')
print('a)cd')
print('b) ls')
print('c) cat')
print('d) mv')
print('e) rm')
respuesta_1 = input('\n tu respuesta: ')
while respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_1 = input("debes volver a ingresar tu respuesta:")
if respuesta_1 == "b":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\nsiguiente pregunta')
print('\ncual de estos comandos sirve para mover un archivo en termux')
print('a) cd')
print('b) cp')
print('c) mv')
print('d) cat')
print('e) chmod')
respuesta_2 = input('tu respuesta: ')
while respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_2 = input("debes volver a ingresar tu respuesta:")
if respuesta_2 == "b":
puntaje -= 5
print('incorrecto', n1, '!')
elif respuesta_2 == "a":
puntaje -= 5
print('mal', n1, ', incorreto')
elif respuesta_2 == "d":
puntaje -= 5
print('no', n1, '! incorrecto')
elif respuesta_2 == "e":
puntaje -= 5
print('mal', n1, '! incorrecto')
else:
puntaje += 10
print('correcto', n1, '!!!!')
print('\nsiguiente pregunta')
print('\nque comando puede dar permisos?')
print('a) chmod')
print('b) cal')
print('c) rm')
print('d) mkdir')
print('e) ls -l')
respuesta_3 = input('\n tu respuesta: ')
while respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_3 = input("debes volver a ingresar tu respuesta:")
if respuesta_3 == "a":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\nsiguiente pregunta')
print('\ncual de estos comandos puede crear un directorio?')
print('a) rm')
print('b) mv')
print('c) cp')
print('d) mkdir')
print('e) exit')
respuesta_4 = input('\n tu respuesta: ')
while respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_4 = input("debes volver a ingresar tu respuesta:")
if respuesta_4 == "d":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\nsiguiente pregunta')
print('\ncon que comando puedo dar permisos e almacenaminto a termux?')
print('a) pwd')
print('b) ls -a')
print('c) lstree')
print('d) temux setup-storage')
print('e) rm -rf')
respuesta_5 = input('\n tu respuesta: ')
while respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):
respuesta_5 = input("debes volver a ingresar tu respuesta:")
if respuesta_5 == "d":
puntaje += 10
print("Muy bien", n1, "!")
else:
puntaje -= 5
print("Incorrecto", n1, "!")
print('\ngracias por jugar', n1, '!')
print('\neste es tu puntaje:')
print('tienes', puntaje , 'puntos')
print('\nchao, chuidate xd')
|
normal
|
{
"blob_id": "0c297e6f79682896e98c7a2933a4da6d9af7d7fe",
"index": 9060,
"step-1": "<mask token>\n",
"step-2": "print(\n '¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas'\n )\n<mask token>\nprint('\\nmucho gusto', n1, ',empecemos')\n<mask token>\nprint(\n 'me puedes decir con que comando en linux puedo listar la informacion de un directorio?'\n )\nprint('a)cd')\nprint('b) ls')\nprint('c) cat')\nprint('d) mv')\nprint('e) rm')\n<mask token>\nwhile respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_1 = input('debes volver a ingresar tu respuesta:')\nif respuesta_1 == 'b':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos sirve para mover un archivo en termux\"\"\")\nprint('a) cd')\nprint('b) cp')\nprint('c) mv')\nprint('d) cat')\nprint('e) chmod')\n<mask token>\nwhile respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_2 = input('debes volver a ingresar tu respuesta:')\nif respuesta_2 == 'b':\n puntaje -= 5\n print('incorrecto', n1, '!')\nelif respuesta_2 == 'a':\n puntaje -= 5\n print('mal', n1, ', incorreto')\nelif respuesta_2 == 'd':\n puntaje -= 5\n print('no', n1, '! incorrecto')\nelif respuesta_2 == 'e':\n puntaje -= 5\n print('mal', n1, '! incorrecto')\nelse:\n puntaje += 10\n print('correcto', n1, '!!!!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\nque comando puede dar permisos?\"\"\")\nprint('a) chmod')\nprint('b) cal')\nprint('c) rm')\nprint('d) mkdir')\nprint('e) ls -l')\n<mask token>\nwhile respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_3 = input('debes volver a ingresar tu respuesta:')\nif respuesta_3 == 'a':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos puede crear un directorio?\"\"\")\nprint('a) rm')\nprint('b) mv')\nprint('c) cp')\nprint('d) mkdir')\nprint('e) exit')\n<mask token>\nwhile respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_4 = input('debes volver a ingresar tu respuesta:')\nif respuesta_4 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncon que comando puedo dar permisos e almacenaminto a termux?\"\"\")\nprint('a) pwd')\nprint('b) ls -a')\nprint('c) lstree')\nprint('d) temux setup-storage')\nprint('e) rm -rf')\n<mask token>\nwhile respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_5 = input('debes volver a ingresar tu respuesta:')\nif respuesta_5 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\nprint('Incorrecto', n1, '!')\nprint(\"\"\"\ngracias por jugar\"\"\", n1, '!')\nprint(\"\"\"\neste es tu puntaje:\"\"\")\nprint('tienes', puntaje, 'puntos')\nprint(\"\"\"\nchao, chuidate xd\"\"\")\n",
"step-3": "print(\n '¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas'\n )\nn1 = input(\"\"\"\n por favor dime como te llamas:\"\"\")\nprint('\\nmucho gusto', n1, ',empecemos')\npuntaje = 0\nprint(\n 'me puedes decir con que comando en linux puedo listar la informacion de un directorio?'\n )\nprint('a)cd')\nprint('b) ls')\nprint('c) cat')\nprint('d) mv')\nprint('e) rm')\nrespuesta_1 = input('\\n tu respuesta: ')\nwhile respuesta_1 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_1 = input('debes volver a ingresar tu respuesta:')\nif respuesta_1 == 'b':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos sirve para mover un archivo en termux\"\"\")\nprint('a) cd')\nprint('b) cp')\nprint('c) mv')\nprint('d) cat')\nprint('e) chmod')\nrespuesta_2 = input('tu respuesta: ')\nwhile respuesta_2 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_2 = input('debes volver a ingresar tu respuesta:')\nif respuesta_2 == 'b':\n puntaje -= 5\n print('incorrecto', n1, '!')\nelif respuesta_2 == 'a':\n puntaje -= 5\n print('mal', n1, ', incorreto')\nelif respuesta_2 == 'd':\n puntaje -= 5\n print('no', n1, '! incorrecto')\nelif respuesta_2 == 'e':\n puntaje -= 5\n print('mal', n1, '! incorrecto')\nelse:\n puntaje += 10\n print('correcto', n1, '!!!!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\nque comando puede dar permisos?\"\"\")\nprint('a) chmod')\nprint('b) cal')\nprint('c) rm')\nprint('d) mkdir')\nprint('e) ls -l')\nrespuesta_3 = input('\\n tu respuesta: ')\nwhile respuesta_3 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_3 = input('debes volver a ingresar tu respuesta:')\nif respuesta_3 == 'a':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncual de estos comandos puede crear un directorio?\"\"\")\nprint('a) rm')\nprint('b) mv')\nprint('c) cp')\nprint('d) mkdir')\nprint('e) exit')\nrespuesta_4 = input('\\n tu respuesta: ')\nwhile respuesta_4 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_4 = input('debes volver a ingresar tu respuesta:')\nif respuesta_4 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\n print('Incorrecto', n1, '!')\nprint(\"\"\"\nsiguiente pregunta\"\"\")\nprint(\"\"\"\ncon que comando puedo dar permisos e almacenaminto a termux?\"\"\")\nprint('a) pwd')\nprint('b) ls -a')\nprint('c) lstree')\nprint('d) temux setup-storage')\nprint('e) rm -rf')\nrespuesta_5 = input('\\n tu respuesta: ')\nwhile respuesta_5 not in ('a', 'b', 'c', 'd', 'e'):\n respuesta_5 = input('debes volver a ingresar tu respuesta:')\nif respuesta_5 == 'd':\n puntaje += 10\n print('Muy bien', n1, '!')\nelse:\n puntaje -= 5\nprint('Incorrecto', n1, '!')\nprint(\"\"\"\ngracias por jugar\"\"\", n1, '!')\nprint(\"\"\"\neste es tu puntaje:\"\"\")\nprint('tienes', puntaje, 'puntos')\nprint(\"\"\"\nchao, chuidate xd\"\"\")\n",
"step-4": "#juego trivia hecho por mayu xD\r\nprint('¡hola! te invito a jugar mi juego trivia, trataremos temas como termux xd y entre otras cosas')\r\nn1 = input('\\n por favor dime como te llamas:')\r\nprint('\\nmucho gusto', n1, ',empecemos')\r\npuntaje = 0\r\nprint('me puedes decir con que comando en linux puedo listar la informacion de un directorio?')\r\nprint('a)cd')\r\nprint('b) ls')\r\nprint('c) cat')\r\nprint('d) mv')\r\nprint('e) rm')\r\nrespuesta_1 = input('\\n tu respuesta: ')\r\nwhile respuesta_1 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_1 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_1 == \"b\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n print(\"Incorrecto\", n1, \"!\")\r\n \r\nprint('\\nsiguiente pregunta')\r\nprint('\\ncual de estos comandos sirve para mover un archivo en termux')\r\nprint('a) cd')\r\nprint('b) cp')\r\nprint('c) mv')\r\nprint('d) cat')\r\nprint('e) chmod')\r\nrespuesta_2 = input('tu respuesta: ')\r\nwhile respuesta_2 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_2 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_2 == \"b\":\r\n puntaje -= 5\r\n print('incorrecto', n1, '!')\r\nelif respuesta_2 == \"a\":\r\n puntaje -= 5\r\n print('mal', n1, ', incorreto')\r\nelif respuesta_2 == \"d\":\r\n puntaje -= 5\r\n print('no', n1, '! incorrecto')\r\nelif respuesta_2 == \"e\":\r\n puntaje -= 5\r\n print('mal', n1, '! incorrecto')\r\nelse:\r\n puntaje += 10\r\n print('correcto', n1, '!!!!')\r\n\r\n\r\n \r\nprint('\\nsiguiente pregunta')\r\nprint('\\nque comando puede dar permisos?')\r\nprint('a) chmod')\r\nprint('b) cal')\r\nprint('c) rm')\r\nprint('d) mkdir')\r\nprint('e) ls -l')\r\nrespuesta_3 = input('\\n tu respuesta: ')\r\nwhile respuesta_3 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_3 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_3 == \"a\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n print(\"Incorrecto\", n1, \"!\")\r\n\r\nprint('\\nsiguiente pregunta')\r\nprint('\\ncual de estos comandos puede crear un directorio?')\r\nprint('a) rm')\r\nprint('b) mv')\r\nprint('c) cp')\r\nprint('d) mkdir')\r\nprint('e) exit')\r\n\r\nrespuesta_4 = input('\\n tu respuesta: ')\r\nwhile respuesta_4 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_4 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_4 == \"d\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n print(\"Incorrecto\", n1, \"!\")\r\n\r\nprint('\\nsiguiente pregunta')\r\nprint('\\ncon que comando puedo dar permisos e almacenaminto a termux?')\r\nprint('a) pwd')\r\nprint('b) ls -a')\r\nprint('c) lstree')\r\nprint('d) temux setup-storage')\r\nprint('e) rm -rf')\r\n\r\nrespuesta_5 = input('\\n tu respuesta: ')\r\nwhile respuesta_5 not in ('a', 'b', 'c', 'd', 'e'): \r\n respuesta_5 = input(\"debes volver a ingresar tu respuesta:\")\r\nif respuesta_5 == \"d\":\r\n puntaje += 10\r\n print(\"Muy bien\", n1, \"!\")\r\nelse:\r\n puntaje -= 5\r\n\r\nprint(\"Incorrecto\", n1, \"!\")\r\nprint('\\ngracias por jugar', n1, '!')\r\nprint('\\neste es tu puntaje:')\r\nprint('tienes', puntaje , 'puntos')\r\nprint('\\nchao, chuidate xd')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import urllib.request
import io
import cv2
import numpy as np
img_url = 'http://192.168.0.2:7079/hi'
while True:
data = urllib.request.urlopen(img_url)
raw_data = data.read()
nparr = np.frombuffer(raw_data, np.byte)
image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
cv2.imshow("test", image_raw)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "c120db53e1ea5a5b865b891cf602a13113fb1e41",
"index": 4113,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow('test', image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg_url = 'http://192.168.0.2:7079/hi'\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow('test', image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "import urllib.request\nimport io\nimport cv2\nimport numpy as np\nimg_url = 'http://192.168.0.2:7079/hi'\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow('test', image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "import urllib.request\nimport io\nimport cv2\nimport numpy as np\n\nimg_url = 'http://192.168.0.2:7079/hi'\nwhile True:\n data = urllib.request.urlopen(img_url)\n raw_data = data.read()\n\n nparr = np.frombuffer(raw_data, np.byte)\n image_raw = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)\n cv2.imshow(\"test\", image_raw)\n if cv2.waitKey(1) == ord('q'):\n break\n\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@colander.deferred
def deferred_country_widget(node, kw):
country_codes_data = kw.get('country_codes_data', [])
return widget.Select2Widget(values=country_codes_data)
<|reserved_special_token_0|>
@colander.deferred
def deferred_title_prefix_widget(node, kw):
title_prefix_data = kw.get('title_prefix_data', [])
return widget.Select2Widget(values=title_prefix_data)
<|reserved_special_token_0|>
class AddAccountSchema(colander.Schema):
"""
"""
cou = colander.SchemaNode(colander.Boolean(), title=
'Security and Acceptable Use Policy Acceptance', description=
"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies."
, widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')
stor = colander.SchemaNode(colander.Boolean(), title=
'Data Security Policy Acceptance', description=
"Check this if you have read and agree to the Center's storage policies."
, widget=deform.widget.CheckboxWidget(), validator=stor_validator,
oid='stor')
givenName = colander.SchemaNode(colander.String(), title=
'Given/First name', description='Your given or first name',
validator=colander.Length(min=1, max=64), widget=widget.
TextInputWidget(placeholder=''), oid='givenName')
middleName = colander.SchemaNode(colander.String(), title=
'Middle name/initial', description='Middle name or initial',
validator=colander.Length(min=0, max=64), widget=widget.
TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')
sn = colander.SchemaNode(colander.String(), title='Family/Last Name',
description='family Name / Last Name', validator=colander.Length(
min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'
)
suffix = colander.SchemaNode(colander.String(), title='Suffix',
description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,
max=32), widget=widget.TextInputWidget(placeholder=
'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')
cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',
description='Your full name. How you want to be addressed.',
validator=colander.Length(min=3, max=64), widget=widget.
TextInputWidget(placeholder=
'(Optional) How you want to be addressed if different from: FirstName LastName'
), missing=unicode(''), oid='cn')
street = colander.SchemaNode(colander.String(), title='Street Address',
description='', validator=colander.Length(min=0, max=200), widget=
widget.TextInputWidget(placeholder='business/institution address'),
oid='street')
lcity = colander.SchemaNode(colander.String(), title='City',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='lcity')
st = colander.SchemaNode(colander.String(), title='State/Province',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='l')
postalCode = colander.SchemaNode(colander.String(), title=
'Post/ZIP Code', description='', validator=colander.Length(min=2,
max=64), widget=widget.TextInputWidget(), oid='postalCode')
country = colander.SchemaNode(colander.String(), title='Country',
description='', widget=widget.SelectWidget(values=country_codes),
validator=valid_country, oid='country')
mail = colander.SchemaNode(colander.String(), title='EMail',
description='Your primary email account', widget=
email_confirm_widget, oid='mail')
phone = colander.SchemaNode(colander.String(), title='Phone number',
description='Please provide your primary telephone number',
validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'
)
cell = colander.SchemaNode(colander.String(), title='Cell phone number',
description='For contact and verification', validator=
phone_validator, missing=unicode(''), widget=widget.TextInputWidget
(placeholder='(Optional) example: +1-000-000-0000'), oid='cell')
employerType = colander.SchemaNode(colander.String(), validator=
colander.OneOf([x[0] for x in employer_types]), widget=deform.
widget.RadioChoiceWidget(values=employer_types), title=
'Employer Type', description=
'Select the employer type from the list below that is most appropriate to your request'
, oid='employerType')
employerName = colander.SchemaNode(colander.String(), title=
'Employer, Institution, or Sponsor Name', description=
'Please provide the name of your employer or the institution you represent'
, validator=colander.Length(min=3, max=128), widget=widget.
TextInputWidget(placeholder='employer name here'), oid='employerName')
citizenStatus = colander.SchemaNode(colander.String(), title=
'Citizenship Status', description=
'Select one of the following options that best describes your U.S. citizenship status'
, validator=colander.OneOf([x[0] for x in citizen_types]), widget=
widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')
citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',
description=
'Please select your country or countries of citizenship', validator
=valid_countries, widget=widget.Select2Widget(values=country_codes,
multiple=True), oid='citizenships')
birthCountry = colander.SchemaNode(colander.String(), title=
'Country of birth', description=
'Please enter/select your country of birth', validator=
valid_country, widget=widget.Select2Widget(values=country_codes),
oid='birthCountry')
isnreluser = colander.SchemaNode(colander.String(), title=
'Existing NREL Account?', description=
'Select the option that is most true for you.', widget=deform.
widget.RadioChoiceWidget(values=has_account), missing=unicode(''),
label='Existing or Previous ESIF HPC UserID', oid='isnreluser')
nrelUserID = colander.SchemaNode(colander.String(), title=
'Your Existing NREL HPC UserID', description=
'If you have --or previously had-- an NREL UserID, enter it here.',
validator=colander.Length(min=1, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='nrelUserID')
justification = colander.SchemaNode(colander.String(), title=
'NREL HPC User Credential Information', widget=widget.
TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=
colander.Length(max=1000), description=
"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials."
, oid='comments')
preferredUID = colander.SchemaNode(colander.String(), title=
'*New* ESIF HPC UserID', description=
'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'
, validator=colander.Length(min=3, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='preferredUID')
comments = colander.SchemaNode(colander.String(), title=
'Additional Notes or Comments', widget=deform.widget.TextAreaWidget
(rows=6, columns=60, placeholder=
'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'
), missing=unicode(''), validator=colander.Length(max=1000),
description=
'If you think we need any additional information to process or approve your request, please let us know.'
, oid='comments')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@colander.deferred
def deferred_country_widget(node, kw):
country_codes_data = kw.get('country_codes_data', [])
return widget.Select2Widget(values=country_codes_data)
@colander.deferred
def deferred_state_widget(node, kw):
us_states_data = kw.get('us_states_data', [])
return widget.Select2Widget(values=us_states_data)
@colander.deferred
def deferred_title_prefix_widget(node, kw):
title_prefix_data = kw.get('title_prefix_data', [])
return widget.Select2Widget(values=title_prefix_data)
<|reserved_special_token_0|>
class AddAccountSchema(colander.Schema):
"""
"""
cou = colander.SchemaNode(colander.Boolean(), title=
'Security and Acceptable Use Policy Acceptance', description=
"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies."
, widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')
stor = colander.SchemaNode(colander.Boolean(), title=
'Data Security Policy Acceptance', description=
"Check this if you have read and agree to the Center's storage policies."
, widget=deform.widget.CheckboxWidget(), validator=stor_validator,
oid='stor')
givenName = colander.SchemaNode(colander.String(), title=
'Given/First name', description='Your given or first name',
validator=colander.Length(min=1, max=64), widget=widget.
TextInputWidget(placeholder=''), oid='givenName')
middleName = colander.SchemaNode(colander.String(), title=
'Middle name/initial', description='Middle name or initial',
validator=colander.Length(min=0, max=64), widget=widget.
TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')
sn = colander.SchemaNode(colander.String(), title='Family/Last Name',
description='family Name / Last Name', validator=colander.Length(
min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'
)
suffix = colander.SchemaNode(colander.String(), title='Suffix',
description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,
max=32), widget=widget.TextInputWidget(placeholder=
'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')
cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',
description='Your full name. How you want to be addressed.',
validator=colander.Length(min=3, max=64), widget=widget.
TextInputWidget(placeholder=
'(Optional) How you want to be addressed if different from: FirstName LastName'
), missing=unicode(''), oid='cn')
street = colander.SchemaNode(colander.String(), title='Street Address',
description='', validator=colander.Length(min=0, max=200), widget=
widget.TextInputWidget(placeholder='business/institution address'),
oid='street')
lcity = colander.SchemaNode(colander.String(), title='City',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='lcity')
st = colander.SchemaNode(colander.String(), title='State/Province',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='l')
postalCode = colander.SchemaNode(colander.String(), title=
'Post/ZIP Code', description='', validator=colander.Length(min=2,
max=64), widget=widget.TextInputWidget(), oid='postalCode')
country = colander.SchemaNode(colander.String(), title='Country',
description='', widget=widget.SelectWidget(values=country_codes),
validator=valid_country, oid='country')
mail = colander.SchemaNode(colander.String(), title='EMail',
description='Your primary email account', widget=
email_confirm_widget, oid='mail')
phone = colander.SchemaNode(colander.String(), title='Phone number',
description='Please provide your primary telephone number',
validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'
)
cell = colander.SchemaNode(colander.String(), title='Cell phone number',
description='For contact and verification', validator=
phone_validator, missing=unicode(''), widget=widget.TextInputWidget
(placeholder='(Optional) example: +1-000-000-0000'), oid='cell')
employerType = colander.SchemaNode(colander.String(), validator=
colander.OneOf([x[0] for x in employer_types]), widget=deform.
widget.RadioChoiceWidget(values=employer_types), title=
'Employer Type', description=
'Select the employer type from the list below that is most appropriate to your request'
, oid='employerType')
employerName = colander.SchemaNode(colander.String(), title=
'Employer, Institution, or Sponsor Name', description=
'Please provide the name of your employer or the institution you represent'
, validator=colander.Length(min=3, max=128), widget=widget.
TextInputWidget(placeholder='employer name here'), oid='employerName')
citizenStatus = colander.SchemaNode(colander.String(), title=
'Citizenship Status', description=
'Select one of the following options that best describes your U.S. citizenship status'
, validator=colander.OneOf([x[0] for x in citizen_types]), widget=
widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')
citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',
description=
'Please select your country or countries of citizenship', validator
=valid_countries, widget=widget.Select2Widget(values=country_codes,
multiple=True), oid='citizenships')
birthCountry = colander.SchemaNode(colander.String(), title=
'Country of birth', description=
'Please enter/select your country of birth', validator=
valid_country, widget=widget.Select2Widget(values=country_codes),
oid='birthCountry')
isnreluser = colander.SchemaNode(colander.String(), title=
'Existing NREL Account?', description=
'Select the option that is most true for you.', widget=deform.
widget.RadioChoiceWidget(values=has_account), missing=unicode(''),
label='Existing or Previous ESIF HPC UserID', oid='isnreluser')
nrelUserID = colander.SchemaNode(colander.String(), title=
'Your Existing NREL HPC UserID', description=
'If you have --or previously had-- an NREL UserID, enter it here.',
validator=colander.Length(min=1, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='nrelUserID')
justification = colander.SchemaNode(colander.String(), title=
'NREL HPC User Credential Information', widget=widget.
TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=
colander.Length(max=1000), description=
"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials."
, oid='comments')
preferredUID = colander.SchemaNode(colander.String(), title=
'*New* ESIF HPC UserID', description=
'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'
, validator=colander.Length(min=3, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='preferredUID')
comments = colander.SchemaNode(colander.String(), title=
'Additional Notes or Comments', widget=deform.widget.TextAreaWidget
(rows=6, columns=60, placeholder=
'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'
), missing=unicode(''), validator=colander.Length(max=1000),
description=
'If you think we need any additional information to process or approve your request, please let us know.'
, oid='comments')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@colander.deferred
def deferred_country_widget(node, kw):
country_codes_data = kw.get('country_codes_data', [])
return widget.Select2Widget(values=country_codes_data)
@colander.deferred
def deferred_state_widget(node, kw):
us_states_data = kw.get('us_states_data', [])
return widget.Select2Widget(values=us_states_data)
@colander.deferred
def deferred_title_prefix_widget(node, kw):
title_prefix_data = kw.get('title_prefix_data', [])
return widget.Select2Widget(values=title_prefix_data)
email_confirm_widget = deform.widget.CheckedInputWidget(subject=
'Email address', confirm_subject='Confirm your Email address')
pref_email_confirm_widget = deform.widget.CheckedInputWidget(subject=
'Optional Preferred Email', confirm_subject=
'Confirm your optional Email address')
sn_widget = widget.TextInputWidget(css_class='form-control')
class AddAccountSchema(colander.Schema):
"""
"""
cou = colander.SchemaNode(colander.Boolean(), title=
'Security and Acceptable Use Policy Acceptance', description=
"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies."
, widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')
stor = colander.SchemaNode(colander.Boolean(), title=
'Data Security Policy Acceptance', description=
"Check this if you have read and agree to the Center's storage policies."
, widget=deform.widget.CheckboxWidget(), validator=stor_validator,
oid='stor')
givenName = colander.SchemaNode(colander.String(), title=
'Given/First name', description='Your given or first name',
validator=colander.Length(min=1, max=64), widget=widget.
TextInputWidget(placeholder=''), oid='givenName')
middleName = colander.SchemaNode(colander.String(), title=
'Middle name/initial', description='Middle name or initial',
validator=colander.Length(min=0, max=64), widget=widget.
TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')
sn = colander.SchemaNode(colander.String(), title='Family/Last Name',
description='family Name / Last Name', validator=colander.Length(
min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'
)
suffix = colander.SchemaNode(colander.String(), title='Suffix',
description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,
max=32), widget=widget.TextInputWidget(placeholder=
'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')
cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',
description='Your full name. How you want to be addressed.',
validator=colander.Length(min=3, max=64), widget=widget.
TextInputWidget(placeholder=
'(Optional) How you want to be addressed if different from: FirstName LastName'
), missing=unicode(''), oid='cn')
street = colander.SchemaNode(colander.String(), title='Street Address',
description='', validator=colander.Length(min=0, max=200), widget=
widget.TextInputWidget(placeholder='business/institution address'),
oid='street')
lcity = colander.SchemaNode(colander.String(), title='City',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='lcity')
st = colander.SchemaNode(colander.String(), title='State/Province',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='l')
postalCode = colander.SchemaNode(colander.String(), title=
'Post/ZIP Code', description='', validator=colander.Length(min=2,
max=64), widget=widget.TextInputWidget(), oid='postalCode')
country = colander.SchemaNode(colander.String(), title='Country',
description='', widget=widget.SelectWidget(values=country_codes),
validator=valid_country, oid='country')
mail = colander.SchemaNode(colander.String(), title='EMail',
description='Your primary email account', widget=
email_confirm_widget, oid='mail')
phone = colander.SchemaNode(colander.String(), title='Phone number',
description='Please provide your primary telephone number',
validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'
)
cell = colander.SchemaNode(colander.String(), title='Cell phone number',
description='For contact and verification', validator=
phone_validator, missing=unicode(''), widget=widget.TextInputWidget
(placeholder='(Optional) example: +1-000-000-0000'), oid='cell')
employerType = colander.SchemaNode(colander.String(), validator=
colander.OneOf([x[0] for x in employer_types]), widget=deform.
widget.RadioChoiceWidget(values=employer_types), title=
'Employer Type', description=
'Select the employer type from the list below that is most appropriate to your request'
, oid='employerType')
employerName = colander.SchemaNode(colander.String(), title=
'Employer, Institution, or Sponsor Name', description=
'Please provide the name of your employer or the institution you represent'
, validator=colander.Length(min=3, max=128), widget=widget.
TextInputWidget(placeholder='employer name here'), oid='employerName')
citizenStatus = colander.SchemaNode(colander.String(), title=
'Citizenship Status', description=
'Select one of the following options that best describes your U.S. citizenship status'
, validator=colander.OneOf([x[0] for x in citizen_types]), widget=
widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')
citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',
description=
'Please select your country or countries of citizenship', validator
=valid_countries, widget=widget.Select2Widget(values=country_codes,
multiple=True), oid='citizenships')
birthCountry = colander.SchemaNode(colander.String(), title=
'Country of birth', description=
'Please enter/select your country of birth', validator=
valid_country, widget=widget.Select2Widget(values=country_codes),
oid='birthCountry')
isnreluser = colander.SchemaNode(colander.String(), title=
'Existing NREL Account?', description=
'Select the option that is most true for you.', widget=deform.
widget.RadioChoiceWidget(values=has_account), missing=unicode(''),
label='Existing or Previous ESIF HPC UserID', oid='isnreluser')
nrelUserID = colander.SchemaNode(colander.String(), title=
'Your Existing NREL HPC UserID', description=
'If you have --or previously had-- an NREL UserID, enter it here.',
validator=colander.Length(min=1, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='nrelUserID')
justification = colander.SchemaNode(colander.String(), title=
'NREL HPC User Credential Information', widget=widget.
TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=
colander.Length(max=1000), description=
"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials."
, oid='comments')
preferredUID = colander.SchemaNode(colander.String(), title=
'*New* ESIF HPC UserID', description=
'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'
, validator=colander.Length(min=3, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='preferredUID')
comments = colander.SchemaNode(colander.String(), title=
'Additional Notes or Comments', widget=deform.widget.TextAreaWidget
(rows=6, columns=60, placeholder=
'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'
), missing=unicode(''), validator=colander.Length(max=1000),
description=
'If you think we need any additional information to process or approve your request, please let us know.'
, oid='comments')
<|reserved_special_token_1|>
import deform
import deform.widget
from deform import widget
import colander
from validators import cyber_validator, phone_validator, stor_validator, cou_validator, valid_country, valid_countries
from .lists import title_prefixes, citizen_types, employer_types, country_codes, has_account
@colander.deferred
def deferred_country_widget(node, kw):
country_codes_data = kw.get('country_codes_data', [])
return widget.Select2Widget(values=country_codes_data)
@colander.deferred
def deferred_state_widget(node, kw):
us_states_data = kw.get('us_states_data', [])
return widget.Select2Widget(values=us_states_data)
@colander.deferred
def deferred_title_prefix_widget(node, kw):
title_prefix_data = kw.get('title_prefix_data', [])
return widget.Select2Widget(values=title_prefix_data)
email_confirm_widget = deform.widget.CheckedInputWidget(subject=
'Email address', confirm_subject='Confirm your Email address')
pref_email_confirm_widget = deform.widget.CheckedInputWidget(subject=
'Optional Preferred Email', confirm_subject=
'Confirm your optional Email address')
sn_widget = widget.TextInputWidget(css_class='form-control')
class AddAccountSchema(colander.Schema):
"""
"""
cou = colander.SchemaNode(colander.Boolean(), title=
'Security and Acceptable Use Policy Acceptance', description=
"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies."
, widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')
stor = colander.SchemaNode(colander.Boolean(), title=
'Data Security Policy Acceptance', description=
"Check this if you have read and agree to the Center's storage policies."
, widget=deform.widget.CheckboxWidget(), validator=stor_validator,
oid='stor')
givenName = colander.SchemaNode(colander.String(), title=
'Given/First name', description='Your given or first name',
validator=colander.Length(min=1, max=64), widget=widget.
TextInputWidget(placeholder=''), oid='givenName')
middleName = colander.SchemaNode(colander.String(), title=
'Middle name/initial', description='Middle name or initial',
validator=colander.Length(min=0, max=64), widget=widget.
TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')
sn = colander.SchemaNode(colander.String(), title='Family/Last Name',
description='family Name / Last Name', validator=colander.Length(
min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'
)
suffix = colander.SchemaNode(colander.String(), title='Suffix',
description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,
max=32), widget=widget.TextInputWidget(placeholder=
'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')
cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',
description='Your full name. How you want to be addressed.',
validator=colander.Length(min=3, max=64), widget=widget.
TextInputWidget(placeholder=
'(Optional) How you want to be addressed if different from: FirstName LastName'
), missing=unicode(''), oid='cn')
street = colander.SchemaNode(colander.String(), title='Street Address',
description='', validator=colander.Length(min=0, max=200), widget=
widget.TextInputWidget(placeholder='business/institution address'),
oid='street')
lcity = colander.SchemaNode(colander.String(), title='City',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='lcity')
st = colander.SchemaNode(colander.String(), title='State/Province',
description='', validator=colander.Length(min=1, max=128), widget=
widget.TextInputWidget(), oid='l')
postalCode = colander.SchemaNode(colander.String(), title=
'Post/ZIP Code', description='', validator=colander.Length(min=2,
max=64), widget=widget.TextInputWidget(), oid='postalCode')
country = colander.SchemaNode(colander.String(), title='Country',
description='', widget=widget.SelectWidget(values=country_codes),
validator=valid_country, oid='country')
mail = colander.SchemaNode(colander.String(), title='EMail',
description='Your primary email account', widget=
email_confirm_widget, oid='mail')
phone = colander.SchemaNode(colander.String(), title='Phone number',
description='Please provide your primary telephone number',
validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'
)
cell = colander.SchemaNode(colander.String(), title='Cell phone number',
description='For contact and verification', validator=
phone_validator, missing=unicode(''), widget=widget.TextInputWidget
(placeholder='(Optional) example: +1-000-000-0000'), oid='cell')
employerType = colander.SchemaNode(colander.String(), validator=
colander.OneOf([x[0] for x in employer_types]), widget=deform.
widget.RadioChoiceWidget(values=employer_types), title=
'Employer Type', description=
'Select the employer type from the list below that is most appropriate to your request'
, oid='employerType')
employerName = colander.SchemaNode(colander.String(), title=
'Employer, Institution, or Sponsor Name', description=
'Please provide the name of your employer or the institution you represent'
, validator=colander.Length(min=3, max=128), widget=widget.
TextInputWidget(placeholder='employer name here'), oid='employerName')
citizenStatus = colander.SchemaNode(colander.String(), title=
'Citizenship Status', description=
'Select one of the following options that best describes your U.S. citizenship status'
, validator=colander.OneOf([x[0] for x in citizen_types]), widget=
widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')
citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',
description=
'Please select your country or countries of citizenship', validator
=valid_countries, widget=widget.Select2Widget(values=country_codes,
multiple=True), oid='citizenships')
birthCountry = colander.SchemaNode(colander.String(), title=
'Country of birth', description=
'Please enter/select your country of birth', validator=
valid_country, widget=widget.Select2Widget(values=country_codes),
oid='birthCountry')
isnreluser = colander.SchemaNode(colander.String(), title=
'Existing NREL Account?', description=
'Select the option that is most true for you.', widget=deform.
widget.RadioChoiceWidget(values=has_account), missing=unicode(''),
label='Existing or Previous ESIF HPC UserID', oid='isnreluser')
nrelUserID = colander.SchemaNode(colander.String(), title=
'Your Existing NREL HPC UserID', description=
'If you have --or previously had-- an NREL UserID, enter it here.',
validator=colander.Length(min=1, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='nrelUserID')
justification = colander.SchemaNode(colander.String(), title=
'NREL HPC User Credential Information', widget=widget.
TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=
colander.Length(max=1000), description=
"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials."
, oid='comments')
preferredUID = colander.SchemaNode(colander.String(), title=
'*New* ESIF HPC UserID', description=
'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'
, validator=colander.Length(min=3, max=16), widget=widget.
TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''
), oid='preferredUID')
comments = colander.SchemaNode(colander.String(), title=
'Additional Notes or Comments', widget=deform.widget.TextAreaWidget
(rows=6, columns=60, placeholder=
'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'
), missing=unicode(''), validator=colander.Length(max=1000),
description=
'If you think we need any additional information to process or approve your request, please let us know.'
, oid='comments')
<|reserved_special_token_1|>
import deform
import deform.widget
from deform import (widget) # decorator, default_renderer, field, form,
import colander
# import htmllaundry
# from htmllaundry import sanitize
from validators import (cyber_validator,
phone_validator,
stor_validator,
cou_validator,
valid_country,
valid_countries)
from .lists import (title_prefixes,
citizen_types,
employer_types,
country_codes,
has_account,
)
@colander.deferred
def deferred_country_widget(node, kw):
country_codes_data = kw.get('country_codes_data', [])
return widget.Select2Widget(values=country_codes_data)
@colander.deferred
def deferred_state_widget(node, kw):
us_states_data = kw.get('us_states_data', [])
return widget.Select2Widget(values=us_states_data)
@colander.deferred
def deferred_title_prefix_widget(node, kw):
title_prefix_data = kw.get('title_prefix_data', [])
return widget.Select2Widget(values=title_prefix_data)
email_confirm_widget = deform.widget.CheckedInputWidget(
subject='Email address',
confirm_subject='Confirm your Email address',
)
pref_email_confirm_widget = deform.widget.CheckedInputWidget(
subject='Optional Preferred Email',
confirm_subject='Confirm your optional Email address',
)
sn_widget = widget.TextInputWidget(
css_class='form-control')
class AddAccountSchema(colander.Schema):
"""
"""
# couTimestamp
cou = colander.SchemaNode(
colander.Boolean(),
title='Security and Acceptable Use Policy Acceptance',
description='Terms and Conditions Agreement - Check this if '
'you have read and agree to abide by the Center\'s '
'Security and Acceptable Use Policies.',
widget=widget.CheckboxWidget(),
validator=cou_validator,
oid='cou'
)
# storTimestamp
stor = colander.SchemaNode(
colander.Boolean(),
title='Data Security Policy Acceptance',
description='Check this if you have read and agree '
'to the Center\'s storage policies.',
widget=deform.widget.CheckboxWidget(),
validator=stor_validator,
oid='stor'
)
# cybeTimestamp
# cyber = colander.SchemaNode(
# colander.Boolean(),
# title='Cyber Security Policy Acceptance',
# description='Check this if you have read and agree to abide by '
# 'the Center\'s Cyber Security policies.',
# widget=deform.widget.CheckboxWidget(),
# validator=cyber_validator,
# oid='cyber'
# )
# titlePrefix = colander.SchemaNode(
# colander.String(),
# title='Honorary',
# description='If you prefer to use n honorary, enter it here.',
# # validator=colander.ContainsOnly([x[0] for x in title_prefixes]),
# #validator=colander.Length(min=1, max=64),
# widget=widget.TextInputWidget(placeholder="Dr., Mr., Ms., etc."),
# missing=unicode(''),
# oid='titlePrefix'
# )
givenName = colander.SchemaNode(
colander.String(),
title='Given/First name',
description='Your given or first name',
validator=colander.Length(min=1, max=64),
widget=widget.TextInputWidget(placeholder=''),
oid='givenName'
)
middleName = colander.SchemaNode(
colander.String(),
title='Middle name/initial',
description='Middle name or initial',
validator=colander.Length(min=0, max=64),
widget=widget.TextInputWidget(
placeholder=''),
missing=unicode(''),
oid='middleName'
)
sn = colander.SchemaNode(
colander.String(),
title='Family/Last Name',
description='family Name / Last Name',
validator=colander.Length(min=1, max=64),
widget=widget.TextInputWidget(
placeholder=''),
oid='sn'
)
suffix = colander.SchemaNode(
colander.String(),
title='Suffix',
description='(Sr. Jr. IV, etc.)',
validator=colander.Length(min=0, max=32),
widget=widget.TextInputWidget(placeholder='example: III, PhD, etc.'),
missing=unicode(''),
oid='suffix'
)
cn = colander.SchemaNode(
colander.String(),
title='Common or Nick Name',
description='Your full name. How you want to be addressed.',
validator=colander.Length(min=3, max=64),
widget=widget.TextInputWidget(
placeholder='(Optional) How you want to be addressed '
'if different from: FirstName LastName'),
missing=unicode(''),
oid='cn'
)
street = colander.SchemaNode(
colander.String(),
title='Street Address',
description='',
validator=colander.Length(min=0, max=200),
widget=widget.TextInputWidget(
placeholder='business/institution address'),
oid='street'
)
lcity = colander.SchemaNode(
colander.String(),
title='City',
description='',
validator=colander.Length(min=1, max=128),
widget=widget.TextInputWidget(),
oid='lcity'
)
st = colander.SchemaNode(
colander.String(),
title='State/Province',
description='',
validator=colander.Length(min=1, max=128),
widget=widget.TextInputWidget(),
oid='l'
)
postalCode = colander.SchemaNode(
colander.String(),
title='Post/ZIP Code',
description='',
validator=colander.Length(min=2, max=64),
widget=widget.TextInputWidget(),
oid='postalCode'
)
country = colander.SchemaNode(
colander.String(),
title='Country',
description='',
widget=widget.SelectWidget(values=country_codes),
#validator=colander.OneOf([x[0] for x in country_codes]),
validator=valid_country,
oid='country'
)
mail = colander.SchemaNode(
colander.String(),
title='EMail',
description='Your primary email account',
# validator=colander.Email(msg="Please provide your work Email address. This will be the primary account we use to contact you."),
widget=email_confirm_widget,
oid='mail'
)
# mailPreferred = colander.SchemaNode(
# colander.String(),
# title='Preferred EMail',
# description='optional preferred email account',
# missing=unicode(''),
# widget=pref_email_confirm_widget,
# oid='mail'
# )
phone = colander.SchemaNode(
colander.String(),
title='Phone number',
description='Please provide your primary telephone number',
validator=phone_validator,
widget=widget.TextInputWidget(),
oid='phone'
)
cell = colander.SchemaNode(
colander.String(),
title='Cell phone number',
description='For contact and verification',
validator=phone_validator,
missing=unicode(''),
widget=widget.TextInputWidget(
placeholder='(Optional) example: +1-000-000-0000'),
oid='cell'
)
employerType = colander.SchemaNode(
colander.String(),
validator=colander.OneOf([x[0] for x in employer_types]),
widget=deform.widget.RadioChoiceWidget(values=employer_types),
title='Employer Type',
description='Select the employer type from the list below that '
'is most appropriate to your request',
oid="employerType"
)
employerName = colander.SchemaNode(
colander.String(),
title='Employer, Institution, or Sponsor Name',
description='Please provide the name of your employer or '
'the institution you represent',
validator=colander.Length(min=3, max=128),
widget=widget.TextInputWidget(placeholder='employer name here'),
oid='employerName'
)
citizenStatus = colander.SchemaNode(
colander.String(),
title='Citizenship Status',
description='Select one of the following options '
'that best describes your U.S. citizenship status',
validator=colander.OneOf([x[0] for x in citizen_types]),
widget=widget.RadioChoiceWidget(values=citizen_types),
oid='citizenStatus'
)
citizenships = colander.SchemaNode(
colander.Set(),
title='Citizenships',
description='Please select your country or countries of citizenship',
validator=valid_countries,
widget=widget.Select2Widget(values=country_codes, multiple=True),
oid='citizenships',
)
# birthCountry
birthCountry = colander.SchemaNode(
colander.String(),
title='Country of birth',
description='Please enter/select your country of birth',
validator=valid_country,
widget=widget.Select2Widget(values=country_codes),
oid='birthCountry',
)
isnreluser = colander.SchemaNode(
colander.String(),
title='Existing NREL Account?',
description="Select the option that is most true for you.",
widget=deform.widget.RadioChoiceWidget(values=has_account),
missing=unicode(''),
label='Existing or Previous ESIF HPC UserID',
oid='isnreluser'
)
nrelUserID = colander.SchemaNode(
colander.String(),
title='Your Existing NREL HPC UserID',
description='If you have --or previously had-- an NREL UserID, '
'enter it here.',
validator=colander.Length(min=1, max=16),
widget=widget.TextInputWidget(placeholder='example: jsmythe'),
missing=unicode(''),
oid='nrelUserID'
)
justification = colander.SchemaNode(
colander.String(),
title='NREL HPC User Credential Information',
widget=widget.TextAreaWidget(rows=6, columns=60),
missing=unicode(''),
validator=colander.Length(max=1000),
description="If you don't have an account on NREL HPC systems, "
"we need some additional information. Please provide "
"the project handles or titles of the project allocations "
"you are associated with. "
"If you don't have an allocation, please tell us "
"why you are requesting NREL HPC login credentials.",
oid='comments'
)
preferredUID = colander.SchemaNode(
colander.String(),
title='*New* ESIF HPC UserID',
description="Please provide your desired User ID here.<sup>1</sup>"
"(3 to 16 characters, all lower case.)",
validator=colander.Length(min=3, max=16),
widget=widget.TextInputWidget(placeholder="example: jsmythe"),
missing=unicode(''),
oid='preferredUID'
)
comments = colander.SchemaNode(
colander.String(),
title='Additional Notes or Comments',
widget=deform.widget.TextAreaWidget(rows=6, columns=60,
placeholder='If you think we need any additional '
'information to process or approve your request, '
'please let us know (project name, PI, NREL contact, etc.).'),
missing=unicode(''),
validator=colander.Length(max=1000),
description='If you think we need any additional '
'information to process or approve your request, '
'please let us know.',
oid='comments'
)
# approvalStatus = colander.SchemaNode(
# colander.Integer(),
# title='Approval Status',
# description='The current status if the request review process',
# validator=deferred_review_status_validator,
# default=0,
# widget=widget.HiddenWidget(),
# missing=unicode(''),
# oid='approvalStatus'
# )
|
flexible
|
{
"blob_id": "3a3400426b054b2fc3d060141a1f84e5db553e59",
"index": 3424,
"step-1": "<mask token>\n\n\n@colander.deferred\ndef deferred_country_widget(node, kw):\n country_codes_data = kw.get('country_codes_data', [])\n return widget.Select2Widget(values=country_codes_data)\n\n\n<mask token>\n\n\n@colander.deferred\ndef deferred_title_prefix_widget(node, kw):\n title_prefix_data = kw.get('title_prefix_data', [])\n return widget.Select2Widget(values=title_prefix_data)\n\n\n<mask token>\n\n\nclass AddAccountSchema(colander.Schema):\n \"\"\"\n\n \"\"\"\n cou = colander.SchemaNode(colander.Boolean(), title=\n 'Security and Acceptable Use Policy Acceptance', description=\n \"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies.\"\n , widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')\n stor = colander.SchemaNode(colander.Boolean(), title=\n 'Data Security Policy Acceptance', description=\n \"Check this if you have read and agree to the Center's storage policies.\"\n , widget=deform.widget.CheckboxWidget(), validator=stor_validator,\n oid='stor')\n givenName = colander.SchemaNode(colander.String(), title=\n 'Given/First name', description='Your given or first name',\n validator=colander.Length(min=1, max=64), widget=widget.\n TextInputWidget(placeholder=''), oid='givenName')\n middleName = colander.SchemaNode(colander.String(), title=\n 'Middle name/initial', description='Middle name or initial',\n validator=colander.Length(min=0, max=64), widget=widget.\n TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')\n sn = colander.SchemaNode(colander.String(), title='Family/Last Name',\n description='family Name / Last Name', validator=colander.Length(\n min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'\n )\n suffix = colander.SchemaNode(colander.String(), title='Suffix',\n description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,\n max=32), widget=widget.TextInputWidget(placeholder=\n 'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')\n cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',\n description='Your full name. How you want to be addressed.',\n validator=colander.Length(min=3, max=64), widget=widget.\n TextInputWidget(placeholder=\n '(Optional) How you want to be addressed if different from: FirstName LastName'\n ), missing=unicode(''), oid='cn')\n street = colander.SchemaNode(colander.String(), title='Street Address',\n description='', validator=colander.Length(min=0, max=200), widget=\n widget.TextInputWidget(placeholder='business/institution address'),\n oid='street')\n lcity = colander.SchemaNode(colander.String(), title='City',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='lcity')\n st = colander.SchemaNode(colander.String(), title='State/Province',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='l')\n postalCode = colander.SchemaNode(colander.String(), title=\n 'Post/ZIP Code', description='', validator=colander.Length(min=2,\n max=64), widget=widget.TextInputWidget(), oid='postalCode')\n country = colander.SchemaNode(colander.String(), title='Country',\n description='', widget=widget.SelectWidget(values=country_codes),\n validator=valid_country, oid='country')\n mail = colander.SchemaNode(colander.String(), title='EMail',\n description='Your primary email account', widget=\n email_confirm_widget, oid='mail')\n phone = colander.SchemaNode(colander.String(), title='Phone number',\n description='Please provide your primary telephone number',\n validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'\n )\n cell = colander.SchemaNode(colander.String(), title='Cell phone number',\n description='For contact and verification', validator=\n phone_validator, missing=unicode(''), widget=widget.TextInputWidget\n (placeholder='(Optional) example: +1-000-000-0000'), oid='cell')\n employerType = colander.SchemaNode(colander.String(), validator=\n colander.OneOf([x[0] for x in employer_types]), widget=deform.\n widget.RadioChoiceWidget(values=employer_types), title=\n 'Employer Type', description=\n 'Select the employer type from the list below that is most appropriate to your request'\n , oid='employerType')\n employerName = colander.SchemaNode(colander.String(), title=\n 'Employer, Institution, or Sponsor Name', description=\n 'Please provide the name of your employer or the institution you represent'\n , validator=colander.Length(min=3, max=128), widget=widget.\n TextInputWidget(placeholder='employer name here'), oid='employerName')\n citizenStatus = colander.SchemaNode(colander.String(), title=\n 'Citizenship Status', description=\n 'Select one of the following options that best describes your U.S. citizenship status'\n , validator=colander.OneOf([x[0] for x in citizen_types]), widget=\n widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')\n citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',\n description=\n 'Please select your country or countries of citizenship', validator\n =valid_countries, widget=widget.Select2Widget(values=country_codes,\n multiple=True), oid='citizenships')\n birthCountry = colander.SchemaNode(colander.String(), title=\n 'Country of birth', description=\n 'Please enter/select your country of birth', validator=\n valid_country, widget=widget.Select2Widget(values=country_codes),\n oid='birthCountry')\n isnreluser = colander.SchemaNode(colander.String(), title=\n 'Existing NREL Account?', description=\n 'Select the option that is most true for you.', widget=deform.\n widget.RadioChoiceWidget(values=has_account), missing=unicode(''),\n label='Existing or Previous ESIF HPC UserID', oid='isnreluser')\n nrelUserID = colander.SchemaNode(colander.String(), title=\n 'Your Existing NREL HPC UserID', description=\n 'If you have --or previously had-- an NREL UserID, enter it here.',\n validator=colander.Length(min=1, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='nrelUserID')\n justification = colander.SchemaNode(colander.String(), title=\n 'NREL HPC User Credential Information', widget=widget.\n TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=\n colander.Length(max=1000), description=\n \"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials.\"\n , oid='comments')\n preferredUID = colander.SchemaNode(colander.String(), title=\n '*New* ESIF HPC UserID', description=\n 'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'\n , validator=colander.Length(min=3, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='preferredUID')\n comments = colander.SchemaNode(colander.String(), title=\n 'Additional Notes or Comments', widget=deform.widget.TextAreaWidget\n (rows=6, columns=60, placeholder=\n 'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'\n ), missing=unicode(''), validator=colander.Length(max=1000),\n description=\n 'If you think we need any additional information to process or approve your request, please let us know.'\n , oid='comments')\n",
"step-2": "<mask token>\n\n\n@colander.deferred\ndef deferred_country_widget(node, kw):\n country_codes_data = kw.get('country_codes_data', [])\n return widget.Select2Widget(values=country_codes_data)\n\n\n@colander.deferred\ndef deferred_state_widget(node, kw):\n us_states_data = kw.get('us_states_data', [])\n return widget.Select2Widget(values=us_states_data)\n\n\n@colander.deferred\ndef deferred_title_prefix_widget(node, kw):\n title_prefix_data = kw.get('title_prefix_data', [])\n return widget.Select2Widget(values=title_prefix_data)\n\n\n<mask token>\n\n\nclass AddAccountSchema(colander.Schema):\n \"\"\"\n\n \"\"\"\n cou = colander.SchemaNode(colander.Boolean(), title=\n 'Security and Acceptable Use Policy Acceptance', description=\n \"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies.\"\n , widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')\n stor = colander.SchemaNode(colander.Boolean(), title=\n 'Data Security Policy Acceptance', description=\n \"Check this if you have read and agree to the Center's storage policies.\"\n , widget=deform.widget.CheckboxWidget(), validator=stor_validator,\n oid='stor')\n givenName = colander.SchemaNode(colander.String(), title=\n 'Given/First name', description='Your given or first name',\n validator=colander.Length(min=1, max=64), widget=widget.\n TextInputWidget(placeholder=''), oid='givenName')\n middleName = colander.SchemaNode(colander.String(), title=\n 'Middle name/initial', description='Middle name or initial',\n validator=colander.Length(min=0, max=64), widget=widget.\n TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')\n sn = colander.SchemaNode(colander.String(), title='Family/Last Name',\n description='family Name / Last Name', validator=colander.Length(\n min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'\n )\n suffix = colander.SchemaNode(colander.String(), title='Suffix',\n description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,\n max=32), widget=widget.TextInputWidget(placeholder=\n 'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')\n cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',\n description='Your full name. How you want to be addressed.',\n validator=colander.Length(min=3, max=64), widget=widget.\n TextInputWidget(placeholder=\n '(Optional) How you want to be addressed if different from: FirstName LastName'\n ), missing=unicode(''), oid='cn')\n street = colander.SchemaNode(colander.String(), title='Street Address',\n description='', validator=colander.Length(min=0, max=200), widget=\n widget.TextInputWidget(placeholder='business/institution address'),\n oid='street')\n lcity = colander.SchemaNode(colander.String(), title='City',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='lcity')\n st = colander.SchemaNode(colander.String(), title='State/Province',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='l')\n postalCode = colander.SchemaNode(colander.String(), title=\n 'Post/ZIP Code', description='', validator=colander.Length(min=2,\n max=64), widget=widget.TextInputWidget(), oid='postalCode')\n country = colander.SchemaNode(colander.String(), title='Country',\n description='', widget=widget.SelectWidget(values=country_codes),\n validator=valid_country, oid='country')\n mail = colander.SchemaNode(colander.String(), title='EMail',\n description='Your primary email account', widget=\n email_confirm_widget, oid='mail')\n phone = colander.SchemaNode(colander.String(), title='Phone number',\n description='Please provide your primary telephone number',\n validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'\n )\n cell = colander.SchemaNode(colander.String(), title='Cell phone number',\n description='For contact and verification', validator=\n phone_validator, missing=unicode(''), widget=widget.TextInputWidget\n (placeholder='(Optional) example: +1-000-000-0000'), oid='cell')\n employerType = colander.SchemaNode(colander.String(), validator=\n colander.OneOf([x[0] for x in employer_types]), widget=deform.\n widget.RadioChoiceWidget(values=employer_types), title=\n 'Employer Type', description=\n 'Select the employer type from the list below that is most appropriate to your request'\n , oid='employerType')\n employerName = colander.SchemaNode(colander.String(), title=\n 'Employer, Institution, or Sponsor Name', description=\n 'Please provide the name of your employer or the institution you represent'\n , validator=colander.Length(min=3, max=128), widget=widget.\n TextInputWidget(placeholder='employer name here'), oid='employerName')\n citizenStatus = colander.SchemaNode(colander.String(), title=\n 'Citizenship Status', description=\n 'Select one of the following options that best describes your U.S. citizenship status'\n , validator=colander.OneOf([x[0] for x in citizen_types]), widget=\n widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')\n citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',\n description=\n 'Please select your country or countries of citizenship', validator\n =valid_countries, widget=widget.Select2Widget(values=country_codes,\n multiple=True), oid='citizenships')\n birthCountry = colander.SchemaNode(colander.String(), title=\n 'Country of birth', description=\n 'Please enter/select your country of birth', validator=\n valid_country, widget=widget.Select2Widget(values=country_codes),\n oid='birthCountry')\n isnreluser = colander.SchemaNode(colander.String(), title=\n 'Existing NREL Account?', description=\n 'Select the option that is most true for you.', widget=deform.\n widget.RadioChoiceWidget(values=has_account), missing=unicode(''),\n label='Existing or Previous ESIF HPC UserID', oid='isnreluser')\n nrelUserID = colander.SchemaNode(colander.String(), title=\n 'Your Existing NREL HPC UserID', description=\n 'If you have --or previously had-- an NREL UserID, enter it here.',\n validator=colander.Length(min=1, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='nrelUserID')\n justification = colander.SchemaNode(colander.String(), title=\n 'NREL HPC User Credential Information', widget=widget.\n TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=\n colander.Length(max=1000), description=\n \"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials.\"\n , oid='comments')\n preferredUID = colander.SchemaNode(colander.String(), title=\n '*New* ESIF HPC UserID', description=\n 'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'\n , validator=colander.Length(min=3, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='preferredUID')\n comments = colander.SchemaNode(colander.String(), title=\n 'Additional Notes or Comments', widget=deform.widget.TextAreaWidget\n (rows=6, columns=60, placeholder=\n 'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'\n ), missing=unicode(''), validator=colander.Length(max=1000),\n description=\n 'If you think we need any additional information to process or approve your request, please let us know.'\n , oid='comments')\n",
"step-3": "<mask token>\n\n\n@colander.deferred\ndef deferred_country_widget(node, kw):\n country_codes_data = kw.get('country_codes_data', [])\n return widget.Select2Widget(values=country_codes_data)\n\n\n@colander.deferred\ndef deferred_state_widget(node, kw):\n us_states_data = kw.get('us_states_data', [])\n return widget.Select2Widget(values=us_states_data)\n\n\n@colander.deferred\ndef deferred_title_prefix_widget(node, kw):\n title_prefix_data = kw.get('title_prefix_data', [])\n return widget.Select2Widget(values=title_prefix_data)\n\n\nemail_confirm_widget = deform.widget.CheckedInputWidget(subject=\n 'Email address', confirm_subject='Confirm your Email address')\npref_email_confirm_widget = deform.widget.CheckedInputWidget(subject=\n 'Optional Preferred Email', confirm_subject=\n 'Confirm your optional Email address')\nsn_widget = widget.TextInputWidget(css_class='form-control')\n\n\nclass AddAccountSchema(colander.Schema):\n \"\"\"\n\n \"\"\"\n cou = colander.SchemaNode(colander.Boolean(), title=\n 'Security and Acceptable Use Policy Acceptance', description=\n \"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies.\"\n , widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')\n stor = colander.SchemaNode(colander.Boolean(), title=\n 'Data Security Policy Acceptance', description=\n \"Check this if you have read and agree to the Center's storage policies.\"\n , widget=deform.widget.CheckboxWidget(), validator=stor_validator,\n oid='stor')\n givenName = colander.SchemaNode(colander.String(), title=\n 'Given/First name', description='Your given or first name',\n validator=colander.Length(min=1, max=64), widget=widget.\n TextInputWidget(placeholder=''), oid='givenName')\n middleName = colander.SchemaNode(colander.String(), title=\n 'Middle name/initial', description='Middle name or initial',\n validator=colander.Length(min=0, max=64), widget=widget.\n TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')\n sn = colander.SchemaNode(colander.String(), title='Family/Last Name',\n description='family Name / Last Name', validator=colander.Length(\n min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'\n )\n suffix = colander.SchemaNode(colander.String(), title='Suffix',\n description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,\n max=32), widget=widget.TextInputWidget(placeholder=\n 'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')\n cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',\n description='Your full name. How you want to be addressed.',\n validator=colander.Length(min=3, max=64), widget=widget.\n TextInputWidget(placeholder=\n '(Optional) How you want to be addressed if different from: FirstName LastName'\n ), missing=unicode(''), oid='cn')\n street = colander.SchemaNode(colander.String(), title='Street Address',\n description='', validator=colander.Length(min=0, max=200), widget=\n widget.TextInputWidget(placeholder='business/institution address'),\n oid='street')\n lcity = colander.SchemaNode(colander.String(), title='City',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='lcity')\n st = colander.SchemaNode(colander.String(), title='State/Province',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='l')\n postalCode = colander.SchemaNode(colander.String(), title=\n 'Post/ZIP Code', description='', validator=colander.Length(min=2,\n max=64), widget=widget.TextInputWidget(), oid='postalCode')\n country = colander.SchemaNode(colander.String(), title='Country',\n description='', widget=widget.SelectWidget(values=country_codes),\n validator=valid_country, oid='country')\n mail = colander.SchemaNode(colander.String(), title='EMail',\n description='Your primary email account', widget=\n email_confirm_widget, oid='mail')\n phone = colander.SchemaNode(colander.String(), title='Phone number',\n description='Please provide your primary telephone number',\n validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'\n )\n cell = colander.SchemaNode(colander.String(), title='Cell phone number',\n description='For contact and verification', validator=\n phone_validator, missing=unicode(''), widget=widget.TextInputWidget\n (placeholder='(Optional) example: +1-000-000-0000'), oid='cell')\n employerType = colander.SchemaNode(colander.String(), validator=\n colander.OneOf([x[0] for x in employer_types]), widget=deform.\n widget.RadioChoiceWidget(values=employer_types), title=\n 'Employer Type', description=\n 'Select the employer type from the list below that is most appropriate to your request'\n , oid='employerType')\n employerName = colander.SchemaNode(colander.String(), title=\n 'Employer, Institution, or Sponsor Name', description=\n 'Please provide the name of your employer or the institution you represent'\n , validator=colander.Length(min=3, max=128), widget=widget.\n TextInputWidget(placeholder='employer name here'), oid='employerName')\n citizenStatus = colander.SchemaNode(colander.String(), title=\n 'Citizenship Status', description=\n 'Select one of the following options that best describes your U.S. citizenship status'\n , validator=colander.OneOf([x[0] for x in citizen_types]), widget=\n widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')\n citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',\n description=\n 'Please select your country or countries of citizenship', validator\n =valid_countries, widget=widget.Select2Widget(values=country_codes,\n multiple=True), oid='citizenships')\n birthCountry = colander.SchemaNode(colander.String(), title=\n 'Country of birth', description=\n 'Please enter/select your country of birth', validator=\n valid_country, widget=widget.Select2Widget(values=country_codes),\n oid='birthCountry')\n isnreluser = colander.SchemaNode(colander.String(), title=\n 'Existing NREL Account?', description=\n 'Select the option that is most true for you.', widget=deform.\n widget.RadioChoiceWidget(values=has_account), missing=unicode(''),\n label='Existing or Previous ESIF HPC UserID', oid='isnreluser')\n nrelUserID = colander.SchemaNode(colander.String(), title=\n 'Your Existing NREL HPC UserID', description=\n 'If you have --or previously had-- an NREL UserID, enter it here.',\n validator=colander.Length(min=1, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='nrelUserID')\n justification = colander.SchemaNode(colander.String(), title=\n 'NREL HPC User Credential Information', widget=widget.\n TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=\n colander.Length(max=1000), description=\n \"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials.\"\n , oid='comments')\n preferredUID = colander.SchemaNode(colander.String(), title=\n '*New* ESIF HPC UserID', description=\n 'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'\n , validator=colander.Length(min=3, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='preferredUID')\n comments = colander.SchemaNode(colander.String(), title=\n 'Additional Notes or Comments', widget=deform.widget.TextAreaWidget\n (rows=6, columns=60, placeholder=\n 'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'\n ), missing=unicode(''), validator=colander.Length(max=1000),\n description=\n 'If you think we need any additional information to process or approve your request, please let us know.'\n , oid='comments')\n",
"step-4": "import deform\nimport deform.widget\nfrom deform import widget\nimport colander\nfrom validators import cyber_validator, phone_validator, stor_validator, cou_validator, valid_country, valid_countries\nfrom .lists import title_prefixes, citizen_types, employer_types, country_codes, has_account\n\n\n@colander.deferred\ndef deferred_country_widget(node, kw):\n country_codes_data = kw.get('country_codes_data', [])\n return widget.Select2Widget(values=country_codes_data)\n\n\n@colander.deferred\ndef deferred_state_widget(node, kw):\n us_states_data = kw.get('us_states_data', [])\n return widget.Select2Widget(values=us_states_data)\n\n\n@colander.deferred\ndef deferred_title_prefix_widget(node, kw):\n title_prefix_data = kw.get('title_prefix_data', [])\n return widget.Select2Widget(values=title_prefix_data)\n\n\nemail_confirm_widget = deform.widget.CheckedInputWidget(subject=\n 'Email address', confirm_subject='Confirm your Email address')\npref_email_confirm_widget = deform.widget.CheckedInputWidget(subject=\n 'Optional Preferred Email', confirm_subject=\n 'Confirm your optional Email address')\nsn_widget = widget.TextInputWidget(css_class='form-control')\n\n\nclass AddAccountSchema(colander.Schema):\n \"\"\"\n\n \"\"\"\n cou = colander.SchemaNode(colander.Boolean(), title=\n 'Security and Acceptable Use Policy Acceptance', description=\n \"Terms and Conditions Agreement - Check this if you have read and agree to abide by the Center's Security and Acceptable Use Policies.\"\n , widget=widget.CheckboxWidget(), validator=cou_validator, oid='cou')\n stor = colander.SchemaNode(colander.Boolean(), title=\n 'Data Security Policy Acceptance', description=\n \"Check this if you have read and agree to the Center's storage policies.\"\n , widget=deform.widget.CheckboxWidget(), validator=stor_validator,\n oid='stor')\n givenName = colander.SchemaNode(colander.String(), title=\n 'Given/First name', description='Your given or first name',\n validator=colander.Length(min=1, max=64), widget=widget.\n TextInputWidget(placeholder=''), oid='givenName')\n middleName = colander.SchemaNode(colander.String(), title=\n 'Middle name/initial', description='Middle name or initial',\n validator=colander.Length(min=0, max=64), widget=widget.\n TextInputWidget(placeholder=''), missing=unicode(''), oid='middleName')\n sn = colander.SchemaNode(colander.String(), title='Family/Last Name',\n description='family Name / Last Name', validator=colander.Length(\n min=1, max=64), widget=widget.TextInputWidget(placeholder=''), oid='sn'\n )\n suffix = colander.SchemaNode(colander.String(), title='Suffix',\n description='(Sr. Jr. IV, etc.)', validator=colander.Length(min=0,\n max=32), widget=widget.TextInputWidget(placeholder=\n 'example: III, PhD, etc.'), missing=unicode(''), oid='suffix')\n cn = colander.SchemaNode(colander.String(), title='Common or Nick Name',\n description='Your full name. How you want to be addressed.',\n validator=colander.Length(min=3, max=64), widget=widget.\n TextInputWidget(placeholder=\n '(Optional) How you want to be addressed if different from: FirstName LastName'\n ), missing=unicode(''), oid='cn')\n street = colander.SchemaNode(colander.String(), title='Street Address',\n description='', validator=colander.Length(min=0, max=200), widget=\n widget.TextInputWidget(placeholder='business/institution address'),\n oid='street')\n lcity = colander.SchemaNode(colander.String(), title='City',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='lcity')\n st = colander.SchemaNode(colander.String(), title='State/Province',\n description='', validator=colander.Length(min=1, max=128), widget=\n widget.TextInputWidget(), oid='l')\n postalCode = colander.SchemaNode(colander.String(), title=\n 'Post/ZIP Code', description='', validator=colander.Length(min=2,\n max=64), widget=widget.TextInputWidget(), oid='postalCode')\n country = colander.SchemaNode(colander.String(), title='Country',\n description='', widget=widget.SelectWidget(values=country_codes),\n validator=valid_country, oid='country')\n mail = colander.SchemaNode(colander.String(), title='EMail',\n description='Your primary email account', widget=\n email_confirm_widget, oid='mail')\n phone = colander.SchemaNode(colander.String(), title='Phone number',\n description='Please provide your primary telephone number',\n validator=phone_validator, widget=widget.TextInputWidget(), oid='phone'\n )\n cell = colander.SchemaNode(colander.String(), title='Cell phone number',\n description='For contact and verification', validator=\n phone_validator, missing=unicode(''), widget=widget.TextInputWidget\n (placeholder='(Optional) example: +1-000-000-0000'), oid='cell')\n employerType = colander.SchemaNode(colander.String(), validator=\n colander.OneOf([x[0] for x in employer_types]), widget=deform.\n widget.RadioChoiceWidget(values=employer_types), title=\n 'Employer Type', description=\n 'Select the employer type from the list below that is most appropriate to your request'\n , oid='employerType')\n employerName = colander.SchemaNode(colander.String(), title=\n 'Employer, Institution, or Sponsor Name', description=\n 'Please provide the name of your employer or the institution you represent'\n , validator=colander.Length(min=3, max=128), widget=widget.\n TextInputWidget(placeholder='employer name here'), oid='employerName')\n citizenStatus = colander.SchemaNode(colander.String(), title=\n 'Citizenship Status', description=\n 'Select one of the following options that best describes your U.S. citizenship status'\n , validator=colander.OneOf([x[0] for x in citizen_types]), widget=\n widget.RadioChoiceWidget(values=citizen_types), oid='citizenStatus')\n citizenships = colander.SchemaNode(colander.Set(), title='Citizenships',\n description=\n 'Please select your country or countries of citizenship', validator\n =valid_countries, widget=widget.Select2Widget(values=country_codes,\n multiple=True), oid='citizenships')\n birthCountry = colander.SchemaNode(colander.String(), title=\n 'Country of birth', description=\n 'Please enter/select your country of birth', validator=\n valid_country, widget=widget.Select2Widget(values=country_codes),\n oid='birthCountry')\n isnreluser = colander.SchemaNode(colander.String(), title=\n 'Existing NREL Account?', description=\n 'Select the option that is most true for you.', widget=deform.\n widget.RadioChoiceWidget(values=has_account), missing=unicode(''),\n label='Existing or Previous ESIF HPC UserID', oid='isnreluser')\n nrelUserID = colander.SchemaNode(colander.String(), title=\n 'Your Existing NREL HPC UserID', description=\n 'If you have --or previously had-- an NREL UserID, enter it here.',\n validator=colander.Length(min=1, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='nrelUserID')\n justification = colander.SchemaNode(colander.String(), title=\n 'NREL HPC User Credential Information', widget=widget.\n TextAreaWidget(rows=6, columns=60), missing=unicode(''), validator=\n colander.Length(max=1000), description=\n \"If you don't have an account on NREL HPC systems, we need some additional information. Please provide the project handles or titles of the project allocations you are associated with. If you don't have an allocation, please tell us why you are requesting NREL HPC login credentials.\"\n , oid='comments')\n preferredUID = colander.SchemaNode(colander.String(), title=\n '*New* ESIF HPC UserID', description=\n 'Please provide your desired User ID here.<sup>1</sup>(3 to 16 characters, all lower case.)'\n , validator=colander.Length(min=3, max=16), widget=widget.\n TextInputWidget(placeholder='example: jsmythe'), missing=unicode(''\n ), oid='preferredUID')\n comments = colander.SchemaNode(colander.String(), title=\n 'Additional Notes or Comments', widget=deform.widget.TextAreaWidget\n (rows=6, columns=60, placeholder=\n 'If you think we need any additional information to process or approve your request, please let us know (project name, PI, NREL contact, etc.).'\n ), missing=unicode(''), validator=colander.Length(max=1000),\n description=\n 'If you think we need any additional information to process or approve your request, please let us know.'\n , oid='comments')\n",
"step-5": "import deform\nimport deform.widget\nfrom deform import (widget) # decorator, default_renderer, field, form,\nimport colander\n# import htmllaundry\n# from htmllaundry import sanitize\n\nfrom validators import (cyber_validator,\n phone_validator,\n stor_validator,\n cou_validator,\n valid_country,\n valid_countries)\n\nfrom .lists import (title_prefixes,\n citizen_types,\n employer_types,\n country_codes,\n has_account,\n )\n\n\n@colander.deferred\ndef deferred_country_widget(node, kw):\n country_codes_data = kw.get('country_codes_data', [])\n return widget.Select2Widget(values=country_codes_data)\n\n\n@colander.deferred\ndef deferred_state_widget(node, kw):\n us_states_data = kw.get('us_states_data', [])\n return widget.Select2Widget(values=us_states_data)\n\n\n@colander.deferred\ndef deferred_title_prefix_widget(node, kw):\n title_prefix_data = kw.get('title_prefix_data', [])\n return widget.Select2Widget(values=title_prefix_data)\n\n\nemail_confirm_widget = deform.widget.CheckedInputWidget(\n subject='Email address',\n confirm_subject='Confirm your Email address',\n )\n\npref_email_confirm_widget = deform.widget.CheckedInputWidget(\n subject='Optional Preferred Email',\n confirm_subject='Confirm your optional Email address',\n )\n\nsn_widget = widget.TextInputWidget(\n css_class='form-control')\n\n\nclass AddAccountSchema(colander.Schema):\n \"\"\"\n\n \"\"\"\n # couTimestamp\n cou = colander.SchemaNode(\n colander.Boolean(),\n title='Security and Acceptable Use Policy Acceptance',\n description='Terms and Conditions Agreement - Check this if '\n 'you have read and agree to abide by the Center\\'s '\n 'Security and Acceptable Use Policies.',\n widget=widget.CheckboxWidget(),\n validator=cou_validator,\n oid='cou'\n )\n\n # storTimestamp\n stor = colander.SchemaNode(\n colander.Boolean(),\n title='Data Security Policy Acceptance',\n description='Check this if you have read and agree '\n 'to the Center\\'s storage policies.',\n widget=deform.widget.CheckboxWidget(),\n validator=stor_validator,\n oid='stor'\n )\n # cybeTimestamp\n # cyber = colander.SchemaNode(\n # colander.Boolean(),\n # title='Cyber Security Policy Acceptance',\n # description='Check this if you have read and agree to abide by '\n # 'the Center\\'s Cyber Security policies.',\n # widget=deform.widget.CheckboxWidget(),\n # validator=cyber_validator,\n # oid='cyber'\n # )\n\n # titlePrefix = colander.SchemaNode(\n # colander.String(),\n # title='Honorary',\n # description='If you prefer to use n honorary, enter it here.',\n # # validator=colander.ContainsOnly([x[0] for x in title_prefixes]),\n # #validator=colander.Length(min=1, max=64),\n # widget=widget.TextInputWidget(placeholder=\"Dr., Mr., Ms., etc.\"),\n # missing=unicode(''),\n # oid='titlePrefix'\n # )\n\n givenName = colander.SchemaNode(\n colander.String(),\n title='Given/First name',\n description='Your given or first name',\n validator=colander.Length(min=1, max=64),\n widget=widget.TextInputWidget(placeholder=''),\n oid='givenName'\n )\n\n middleName = colander.SchemaNode(\n colander.String(),\n title='Middle name/initial',\n description='Middle name or initial',\n validator=colander.Length(min=0, max=64),\n widget=widget.TextInputWidget(\n placeholder=''),\n missing=unicode(''),\n oid='middleName'\n )\n\n sn = colander.SchemaNode(\n colander.String(),\n title='Family/Last Name',\n description='family Name / Last Name',\n validator=colander.Length(min=1, max=64),\n widget=widget.TextInputWidget(\n placeholder=''),\n oid='sn'\n )\n\n suffix = colander.SchemaNode(\n colander.String(),\n title='Suffix',\n description='(Sr. Jr. IV, etc.)',\n validator=colander.Length(min=0, max=32),\n widget=widget.TextInputWidget(placeholder='example: III, PhD, etc.'),\n missing=unicode(''),\n oid='suffix'\n )\n\n cn = colander.SchemaNode(\n colander.String(),\n title='Common or Nick Name',\n description='Your full name. How you want to be addressed.',\n validator=colander.Length(min=3, max=64),\n widget=widget.TextInputWidget(\n placeholder='(Optional) How you want to be addressed '\n 'if different from: FirstName LastName'),\n missing=unicode(''),\n oid='cn'\n )\n\n street = colander.SchemaNode(\n colander.String(),\n title='Street Address',\n description='',\n validator=colander.Length(min=0, max=200),\n widget=widget.TextInputWidget(\n placeholder='business/institution address'),\n oid='street'\n )\n\n lcity = colander.SchemaNode(\n colander.String(),\n title='City',\n description='',\n validator=colander.Length(min=1, max=128),\n widget=widget.TextInputWidget(),\n oid='lcity'\n )\n\n st = colander.SchemaNode(\n colander.String(),\n title='State/Province',\n description='',\n validator=colander.Length(min=1, max=128),\n widget=widget.TextInputWidget(),\n oid='l'\n )\n\n postalCode = colander.SchemaNode(\n colander.String(),\n title='Post/ZIP Code',\n description='',\n validator=colander.Length(min=2, max=64),\n widget=widget.TextInputWidget(),\n oid='postalCode'\n )\n\n country = colander.SchemaNode(\n colander.String(),\n title='Country',\n description='',\n widget=widget.SelectWidget(values=country_codes),\n #validator=colander.OneOf([x[0] for x in country_codes]),\n validator=valid_country,\n oid='country'\n )\n\n mail = colander.SchemaNode(\n colander.String(),\n title='EMail',\n description='Your primary email account',\n # validator=colander.Email(msg=\"Please provide your work Email address. This will be the primary account we use to contact you.\"),\n widget=email_confirm_widget,\n oid='mail'\n )\n\n # mailPreferred = colander.SchemaNode(\n # colander.String(),\n # title='Preferred EMail',\n # description='optional preferred email account',\n # missing=unicode(''),\n # widget=pref_email_confirm_widget,\n # oid='mail'\n # )\n\n phone = colander.SchemaNode(\n colander.String(),\n title='Phone number',\n description='Please provide your primary telephone number',\n validator=phone_validator,\n widget=widget.TextInputWidget(),\n oid='phone'\n )\n\n cell = colander.SchemaNode(\n colander.String(),\n title='Cell phone number',\n description='For contact and verification',\n validator=phone_validator,\n missing=unicode(''),\n widget=widget.TextInputWidget(\n placeholder='(Optional) example: +1-000-000-0000'),\n oid='cell'\n )\n\n employerType = colander.SchemaNode(\n colander.String(),\n validator=colander.OneOf([x[0] for x in employer_types]),\n widget=deform.widget.RadioChoiceWidget(values=employer_types),\n title='Employer Type',\n description='Select the employer type from the list below that '\n 'is most appropriate to your request',\n oid=\"employerType\"\n )\n\n employerName = colander.SchemaNode(\n colander.String(),\n title='Employer, Institution, or Sponsor Name',\n description='Please provide the name of your employer or '\n 'the institution you represent',\n validator=colander.Length(min=3, max=128),\n widget=widget.TextInputWidget(placeholder='employer name here'),\n oid='employerName'\n )\n\n citizenStatus = colander.SchemaNode(\n colander.String(),\n title='Citizenship Status',\n description='Select one of the following options '\n 'that best describes your U.S. citizenship status',\n validator=colander.OneOf([x[0] for x in citizen_types]),\n widget=widget.RadioChoiceWidget(values=citizen_types),\n oid='citizenStatus'\n )\n\n citizenships = colander.SchemaNode(\n colander.Set(),\n title='Citizenships',\n description='Please select your country or countries of citizenship',\n validator=valid_countries,\n widget=widget.Select2Widget(values=country_codes, multiple=True),\n oid='citizenships',\n )\n\n # birthCountry\n birthCountry = colander.SchemaNode(\n colander.String(),\n title='Country of birth',\n description='Please enter/select your country of birth',\n validator=valid_country,\n widget=widget.Select2Widget(values=country_codes),\n oid='birthCountry',\n )\n\n isnreluser = colander.SchemaNode(\n colander.String(),\n title='Existing NREL Account?',\n description=\"Select the option that is most true for you.\",\n widget=deform.widget.RadioChoiceWidget(values=has_account),\n missing=unicode(''),\n label='Existing or Previous ESIF HPC UserID',\n oid='isnreluser'\n )\n\n nrelUserID = colander.SchemaNode(\n colander.String(),\n title='Your Existing NREL HPC UserID',\n description='If you have --or previously had-- an NREL UserID, '\n 'enter it here.',\n validator=colander.Length(min=1, max=16),\n widget=widget.TextInputWidget(placeholder='example: jsmythe'),\n missing=unicode(''),\n oid='nrelUserID'\n )\n\n justification = colander.SchemaNode(\n colander.String(),\n title='NREL HPC User Credential Information',\n widget=widget.TextAreaWidget(rows=6, columns=60),\n missing=unicode(''),\n validator=colander.Length(max=1000),\n description=\"If you don't have an account on NREL HPC systems, \"\n \"we need some additional information. Please provide \"\n \"the project handles or titles of the project allocations \"\n \"you are associated with. \"\n \"If you don't have an allocation, please tell us \"\n \"why you are requesting NREL HPC login credentials.\",\n oid='comments'\n )\n\n preferredUID = colander.SchemaNode(\n colander.String(),\n title='*New* ESIF HPC UserID',\n description=\"Please provide your desired User ID here.<sup>1</sup>\"\n \"(3 to 16 characters, all lower case.)\",\n validator=colander.Length(min=3, max=16),\n widget=widget.TextInputWidget(placeholder=\"example: jsmythe\"),\n missing=unicode(''),\n oid='preferredUID'\n )\n\n comments = colander.SchemaNode(\n colander.String(),\n title='Additional Notes or Comments',\n widget=deform.widget.TextAreaWidget(rows=6, columns=60,\n placeholder='If you think we need any additional '\n 'information to process or approve your request, '\n 'please let us know (project name, PI, NREL contact, etc.).'),\n missing=unicode(''),\n validator=colander.Length(max=1000),\n description='If you think we need any additional '\n 'information to process or approve your request, '\n 'please let us know.',\n oid='comments'\n )\n\n # approvalStatus = colander.SchemaNode(\n # colander.Integer(),\n # title='Approval Status',\n # description='The current status if the request review process',\n # validator=deferred_review_status_validator,\n # default=0,\n # widget=widget.HiddenWidget(),\n # missing=unicode(''),\n # oid='approvalStatus'\n # )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
a=eval(input('enter a list: '))
n=len(a)
if (n%2==0):
for i in range(0,n//2):
a[i],a[n//2+i]=a[n//2+i],a[i]
print('after swap:',a)
else:
for i in range(0,n//2):
a[i],a[n//2+i+1]=a[n//2+i+1],a[i]
print('after swap:',a)
|
normal
|
{
"blob_id": "18435f43e2f52e3d2e9ff6411f8dd0510d2da54d",
"index": 656,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif n % 2 == 0:\n for i in range(0, n // 2):\n a[i], a[n // 2 + i] = a[n // 2 + i], a[i]\n print('after swap:', a)\nelse:\n for i in range(0, n // 2):\n a[i], a[n // 2 + i + 1] = a[n // 2 + i + 1], a[i]\n print('after swap:', a)\n",
"step-3": "a = eval(input('enter a list: '))\nn = len(a)\nif n % 2 == 0:\n for i in range(0, n // 2):\n a[i], a[n // 2 + i] = a[n // 2 + i], a[i]\n print('after swap:', a)\nelse:\n for i in range(0, n // 2):\n a[i], a[n // 2 + i + 1] = a[n // 2 + i + 1], a[i]\n print('after swap:', a)\n",
"step-4": "a=eval(input('enter a list: '))\r\nn=len(a)\r\nif (n%2==0):\r\n for i in range(0,n//2):\r\n a[i],a[n//2+i]=a[n//2+i],a[i]\r\n print('after swap:',a)\r\nelse:\r\n for i in range(0,n//2):\r\n a[i],a[n//2+i+1]=a[n//2+i+1],a[i]\r\n print('after swap:',a)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import hashlib
def createMD5(str):
# 创建md5对象
hl = hashlib.md5()
hl.update(str.encode(encoding='utf-8'))
return hl.hexdigest()
|
normal
|
{
"blob_id": "ea78f754ffff26bac1e53ed1e842fd79112b8ee7",
"index": 6811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef createMD5(str):\n hl = hashlib.md5()\n hl.update(str.encode(encoding='utf-8'))\n return hl.hexdigest()\n",
"step-3": "import hashlib\n\n\ndef createMD5(str):\n hl = hashlib.md5()\n hl.update(str.encode(encoding='utf-8'))\n return hl.hexdigest()\n",
"step-4": "import hashlib\ndef createMD5(str):\n # 创建md5对象\n hl = hashlib.md5()\n hl.update(str.encode(encoding='utf-8'))\n return hl.hexdigest()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- utf-8 -*-
from django.db import models
class FieldsTest(models.Model):
pub_date = models.DateTimeField()
mod_date = models.DateTimeField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)
class DTModel(models.Model):
name = models.CharField(max_length=32)
start_datetime = models.DateTimeField(null=True, blank=True)
end_datetime = models.DateTimeField(null=True, blank=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
duration = models.DurationField(null=True, blank=True)
def __str__(self):
return 'DTModel({0})'.format(self.name)
|
normal
|
{
"blob_id": "d6cfe7132855d832d8fd1ea9ca9760bd22109a92",
"index": 1893,
"step-1": "<mask token>\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-2": "<mask token>\n\n\nclass Foo(models.Model):\n <mask token>\n <mask token>\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-3": "<mask token>\n\n\nclass BigS(models.Model):\n <mask token>\n\n\nclass Foo(models.Model):\n a = models.CharField(max_length=10)\n d = models.DecimalField(max_digits=5, decimal_places=3)\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-4": "<mask token>\n\n\nclass FieldsTest(models.Model):\n <mask token>\n <mask token>\n\n\nclass BigS(models.Model):\n s = models.SlugField(max_length=255)\n\n\nclass Foo(models.Model):\n a = models.CharField(max_length=10)\n d = models.DecimalField(max_digits=5, decimal_places=3)\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-5": "# -*- utf-8 -*-\n\nfrom django.db import models\n\n\nclass FieldsTest(models.Model):\n pub_date = models.DateTimeField()\n mod_date = models.DateTimeField()\n\n\nclass BigS(models.Model):\n s = models.SlugField(max_length=255)\n\n\nclass Foo(models.Model):\n a = models.CharField(max_length=10)\n d = models.DecimalField(max_digits=5, decimal_places=3)\n\n\nclass Bar(models.Model):\n b = models.CharField(max_length=10)\n a = models.ForeignKey(Foo, related_name='bars', on_delete=models.CASCADE)\n\n\nclass DTModel(models.Model):\n name = models.CharField(max_length=32)\n start_datetime = models.DateTimeField(null=True, blank=True)\n end_datetime = models.DateTimeField(null=True, blank=True)\n start_date = models.DateField(null=True, blank=True)\n end_date = models.DateField(null=True, blank=True)\n start_time = models.TimeField(null=True, blank=True)\n end_time = models.TimeField(null=True, blank=True)\n duration = models.DurationField(null=True, blank=True)\n\n def __str__(self):\n return 'DTModel({0})'.format(self.name)\n",
"step-ids": [
5,
6,
8,
10,
13
]
}
|
[
5,
6,
8,
10,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, n + 1):
print(i)
if i < n:
print('+')
a.append(i)
print('= {}'.format(sum(a)))
<|reserved_special_token_1|>
n = 7
a = []
for i in range(1, n + 1):
print(i)
if i < n:
print('+')
a.append(i)
print('= {}'.format(sum(a)))
<|reserved_special_token_1|>
n=7
a=[]
for i in range(1,n+1):
print(i)
if(i<n):
print("+")
a.append(i)
print("= {}".format(sum(a)))
|
flexible
|
{
"blob_id": "de9b85c250dea15ff9201054957ebc38017a8c35",
"index": 5435,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n print(i)\n if i < n:\n print('+')\n a.append(i)\nprint('= {}'.format(sum(a)))\n",
"step-3": "n = 7\na = []\nfor i in range(1, n + 1):\n print(i)\n if i < n:\n print('+')\n a.append(i)\nprint('= {}'.format(sum(a)))\n",
"step-4": "n=7\na=[]\nfor i in range(1,n+1):\n print(i)\n if(i<n):\n print(\"+\")\n a.append(i)\nprint(\"= {}\".format(sum(a)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
import argparse
import redis
from Tkinter import *
import ttk
import json
import time
import thread
R = None
NAME = {}
PROBLEM_NAME = {}
CONTEST_ID = None
QUEUE_NAME = None
BACKUP_QUEUE_NAME = None
RUNID_FIELD = "runid"
SUBMIT_TIME_FIELD = "submit_time"
STATUS_FIELD = "status"
STATUS_FINISHED = "finished"
STATUS_WAIT = "wait"
def lower_bound(arr, key):
left = 0
right = len(arr) - 1
res = len(arr)
while left <= right:
mid = (left + right) >> 1
if arr[mid] >= key:
res = mid
right = mid - 1
else:
left = mid + 1
return res
def get_status_key(user_id, pid):
return "status_%d_%d" % (user_id, pid)
def get_name(user_id):
user_id = str(user_id)
if user_id in NAME:
return NAME[user_id]
else:
return "user: %s" % user_id
def get_problem_color(pid):
pid = str(pid)
if pid in PROBLEM_NAME:
return PROBLEM_NAME[pid]
else:
return str(pid)
class PrinterTkinter:
def __init__(self):
self.root = Tk()
self.root.title("气球发放")
self.runid_to_node = dict()
self.runid_to_uid = dict()
self.runid_to_pid = dict()
self.have_uid_pid = set()
self.unfinished_runid = []
self.frame_left_top = Frame(width=400, height=200)
self.frame_right_top = Frame(width=400, height=200)
self.frame_center = Frame(width=800, height=400)
self.frame_bottom = Frame(width=800, height=50)
# 定义左上方区域
self.left_top_title = Label(self.frame_left_top, text="发放状态:", font=('Arial', 25))
self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW, padx=50, pady=30)
self.var_finish = StringVar()
self.var_wait = StringVar()
self.left_top_frame = Frame(self.frame_left_top)
self.left_top_frame_left1 = Label(self.frame_left_top, text="已发放", font=('Arial', 20))
self.left_top_frame_left2 = Label(self.frame_left_top, textvariable=self.var_finish, font=('Arial', 15))
self.var_finish.set(0)
self.left_top_frame_right1 = Label(self.frame_left_top, text="未发放", font=('Arial', 20))
self.left_top_frame_right2 = Label(self.frame_left_top, textvariable=self.var_wait, font=('Arial', 15))
self.var_wait.set(0)
self.left_top_frame_left1.grid(row=1, column=0)
self.left_top_frame_left2.grid(row=1, column=1)
self.left_top_frame_right1.grid(row=2, column=0)
self.left_top_frame_right2.grid(row=2, column=1)
# 定义右上方区域
self.var_entry = StringVar()
self.right_top_title = Label(self.frame_right_top, text="切换状态(输入runid):", font=('Arial', 20))
self.right_top_entry = Entry(self.frame_right_top, textvariable=self.var_entry)
self.number = int
self.right_top_button = Button(self.frame_right_top, text="确定", command=self.button_switch, font=('Arial', 15))
self.right_top_title.grid(row=0, column=0)
self.right_top_entry.grid(row=1, column=0)
self.right_top_button.grid(row=2, column=0, padx=20, pady=20)
# 定义中心列表区域
self.tree = ttk.Treeview(self.frame_center, show="headings", height=18, columns=("a", "b", "c", "d", "e"))
self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL, command=self.tree.yview)
# 定义树形结构与滚动条
self.tree.configure(yscrollcommand=self.vbar.set)
# 表格的标题
self.tree.column("a", width=50, anchor="center")
self.tree.column("b", width=150, anchor="center")
self.tree.column("c", width=150, anchor="center")
self.tree.column("d", width=200, anchor="center")
self.tree.column("e", width=150, anchor="center")
self.tree.heading("a", text="Runid")
self.tree.heading("b", text="User")
self.tree.heading("c", text="Problem")
self.tree.heading("d", text="Time")
self.tree.heading("e", text="Status")
# 调用方法获取表格内容插入
self.get_tree()
self.tree.grid(row=0, column=0, sticky=NSEW)
self.vbar.grid(row=0, column=1, sticky=NS)
# 整体区域定位
self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)
self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)
self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)
self.frame_bottom.grid(row=2, column=0, columnspan=2)
self.frame_left_top.grid_propagate(0)
self.frame_right_top.grid_propagate(0)
self.frame_center.grid_propagate(0)
self.frame_bottom.grid_propagate(0)
thread.start_new_thread(self.listen, ())
self.root.mainloop()
# 表格内容插入
def get_tree(self):
bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)
for bak in bak_list:
bak = bak.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)
self.have_uid_pid.add("%d_%d" % (uid, pid))
elif "%d_%d" % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
self.have_uid_pid.add("%d_%d" % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = "end"
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
def button_switch(self):
self.number = self.right_top_entry.get()
runid = int(self.right_top_entry.get())
if not (runid in self.runid_to_node):
return
self.tree.delete(self.runid_to_node[runid])
uid = self.runid_to_uid[runid]
pid = self.runid_to_pid[runid]
status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
if status_before == STATUS_WAIT:
status = STATUS_FINISHED
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)
else:
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)
if status == STATUS_FINISHED:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.pop(pos)
pos = "end"
else:
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
if status == STATUS_WAIT:
self.var_wait.set(int(self.var_wait.get()) + 1)
self.var_finish.set(int(self.var_finish.get()) - 1)
else:
self.var_wait.set(int(self.var_wait.get()) - 1)
self.var_finish.set(int(self.var_finish.get()) + 1)
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
self.runid_to_node[runid] = node
def listen(self):
while True:
msg = R.blpop(QUEUE_NAME, 0)[1]
R.rpush(BACKUP_QUEUE_NAME, msg)
bak = msg.split('_')
uid = int(bak[0])
pid = int(bak[1])
runid = int(bak[2])
self.runid_to_uid[runid] = uid
self.runid_to_pid[runid] = pid
if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:
R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)
status = STATUS_WAIT
R.hset(get_status_key(uid, pid), STATUS_FIELD, status)
submit_time = time.ctime()
R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)
self.have_uid_pid.add("%d_%d" % (uid, pid))
elif "%d_%d" % (uid, pid) in self.have_uid_pid:
continue
else:
status = R.hget(get_status_key(uid, pid), STATUS_FIELD)
submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)
self.have_uid_pid.add("%d_%d" % (uid, pid))
if status == STATUS_FINISHED:
self.var_finish.set(int(self.var_finish.get()) + 1)
pos = "end"
else:
self.var_wait.set(int(self.var_wait.get()) + 1)
pos = lower_bound(self.unfinished_runid, runid)
self.unfinished_runid.insert(pos, runid)
node = self.tree.insert("", str(pos),
values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))
self.runid_to_node[runid] = node
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')
parser.add_argument('--namefile', dest='namefile', required=True, type=str, help='such as namefile.json')
parser.add_argument('--problemfile', dest='problemfile', required=True, type=str, help='such as problemfile.json')
parser.add_argument('--redishost', dest='redishost', required=True, type=str, help='such as 127.0.0.1')
parser.add_argument('--redisport', dest='redisport', required=True, type=int, help='such as 6379')
parser.add_argument('--contestid', dest='contestid', required=True, type=int, help='such as 9')
args = parser.parse_args()
R = redis.Redis(host=args.redishost, port=args.redisport)
CONTEST_ID = args.contestid
with open(args.namefile) as f:
NAME = json.loads(f.read())
with open(args.problemfile) as f:
PROBLEM_NAME = json.loads(f.read())
QUEUE_NAME = "ballon_%d" % CONTEST_ID
BACKUP_QUEUE_NAME = "ballon_bak_%d" % CONTEST_ID
PrinterTkinter()
|
normal
|
{
"blob_id": "76e1f811d06af0e6e83ae989a236a5cd22c55e01",
"index": 2985,
"step-1": "<mask token>\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = left + right >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return 'status_%d_%d' % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return 'user: %s' % user_id\n\n\n<mask token>\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = left + right >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return 'status_%d_%d' % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return 'user: %s' % user_id\n\n\ndef get_problem_color(pid):\n pid = str(pid)\n if pid in PROBLEM_NAME:\n return PROBLEM_NAME[pid]\n else:\n return str(pid)\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')\n parser.add_argument('--namefile', dest='namefile', required=True, type=\n str, help='such as namefile.json')\n parser.add_argument('--problemfile', dest='problemfile', required=True,\n type=str, help='such as problemfile.json')\n parser.add_argument('--redishost', dest='redishost', required=True,\n type=str, help='such as 127.0.0.1')\n parser.add_argument('--redisport', dest='redisport', required=True,\n type=int, help='such as 6379')\n parser.add_argument('--contestid', dest='contestid', required=True,\n type=int, help='such as 9')\n args = parser.parse_args()\n R = redis.Redis(host=args.redishost, port=args.redisport)\n CONTEST_ID = args.contestid\n with open(args.namefile) as f:\n NAME = json.loads(f.read())\n with open(args.problemfile) as f:\n PROBLEM_NAME = json.loads(f.read())\n QUEUE_NAME = 'ballon_%d' % CONTEST_ID\n BACKUP_QUEUE_NAME = 'ballon_bak_%d' % CONTEST_ID\n PrinterTkinter()\n",
"step-4": "import argparse\nimport redis\nfrom Tkinter import *\nimport ttk\nimport json\nimport time\nimport thread\nR = None\nNAME = {}\nPROBLEM_NAME = {}\nCONTEST_ID = None\nQUEUE_NAME = None\nBACKUP_QUEUE_NAME = None\nRUNID_FIELD = 'runid'\nSUBMIT_TIME_FIELD = 'submit_time'\nSTATUS_FIELD = 'status'\nSTATUS_FINISHED = 'finished'\nSTATUS_WAIT = 'wait'\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = left + right >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return 'status_%d_%d' % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return 'user: %s' % user_id\n\n\ndef get_problem_color(pid):\n pid = str(pid)\n if pid in PROBLEM_NAME:\n return PROBLEM_NAME[pid]\n else:\n return str(pid)\n\n\nclass PrinterTkinter:\n\n def __init__(self):\n self.root = Tk()\n self.root.title('气球发放')\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n self.left_top_title = Label(self.frame_left_top, text='发放状态:', font\n =('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW,\n padx=50, pady=30)\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text='已发放',\n font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable\n =self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text='未发放',\n font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top,\n textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n self.var_entry = StringVar()\n self.right_top_title = Label(self.frame_right_top, text=\n '切换状态(输入runid):', font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=\n self.var_entry)\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text='确定',\n command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n self.tree = ttk.Treeview(self.frame_center, show='headings', height\n =18, columns=('a', 'b', 'c', 'd', 'e'))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL,\n command=self.tree.yview)\n self.tree.configure(yscrollcommand=self.vbar.set)\n self.tree.column('a', width=50, anchor='center')\n self.tree.column('b', width=150, anchor='center')\n self.tree.column('c', width=150, anchor='center')\n self.tree.column('d', width=200, anchor='center')\n self.tree.column('e', width=150, anchor='center')\n self.tree.heading('a', text='Runid')\n self.tree.heading('b', text='User')\n self.tree.heading('c', text='Problem')\n self.tree.heading('d', text='Time')\n self.tree.heading('e', text='Status')\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not runid in self.runid_to_node:\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = 'end'\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(uid),\n get_problem_color(pid), submit_time, status))\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time\n )\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n elif '%d_%d' % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid),\n SUBMIT_TIME_FIELD)\n self.have_uid_pid.add('%d_%d' % (uid, pid))\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = 'end'\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert('', str(pos), values=(runid, get_name(\n uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')\n parser.add_argument('--namefile', dest='namefile', required=True, type=\n str, help='such as namefile.json')\n parser.add_argument('--problemfile', dest='problemfile', required=True,\n type=str, help='such as problemfile.json')\n parser.add_argument('--redishost', dest='redishost', required=True,\n type=str, help='such as 127.0.0.1')\n parser.add_argument('--redisport', dest='redisport', required=True,\n type=int, help='such as 6379')\n parser.add_argument('--contestid', dest='contestid', required=True,\n type=int, help='such as 9')\n args = parser.parse_args()\n R = redis.Redis(host=args.redishost, port=args.redisport)\n CONTEST_ID = args.contestid\n with open(args.namefile) as f:\n NAME = json.loads(f.read())\n with open(args.problemfile) as f:\n PROBLEM_NAME = json.loads(f.read())\n QUEUE_NAME = 'ballon_%d' % CONTEST_ID\n BACKUP_QUEUE_NAME = 'ballon_bak_%d' % CONTEST_ID\n PrinterTkinter()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport redis\nfrom Tkinter import *\nimport ttk\nimport json\nimport time\nimport thread\n\nR = None\nNAME = {}\nPROBLEM_NAME = {}\nCONTEST_ID = None\n\nQUEUE_NAME = None\nBACKUP_QUEUE_NAME = None\nRUNID_FIELD = \"runid\"\nSUBMIT_TIME_FIELD = \"submit_time\"\nSTATUS_FIELD = \"status\"\nSTATUS_FINISHED = \"finished\"\nSTATUS_WAIT = \"wait\"\n\n\ndef lower_bound(arr, key):\n left = 0\n right = len(arr) - 1\n res = len(arr)\n while left <= right:\n mid = (left + right) >> 1\n if arr[mid] >= key:\n res = mid\n right = mid - 1\n else:\n left = mid + 1\n return res\n\n\ndef get_status_key(user_id, pid):\n return \"status_%d_%d\" % (user_id, pid)\n\n\ndef get_name(user_id):\n user_id = str(user_id)\n if user_id in NAME:\n return NAME[user_id]\n else:\n return \"user: %s\" % user_id\n\n\ndef get_problem_color(pid):\n pid = str(pid)\n if pid in PROBLEM_NAME:\n return PROBLEM_NAME[pid]\n else:\n return str(pid)\n\n\nclass PrinterTkinter:\n def __init__(self):\n self.root = Tk()\n self.root.title(\"气球发放\")\n\n self.runid_to_node = dict()\n self.runid_to_uid = dict()\n self.runid_to_pid = dict()\n self.have_uid_pid = set()\n self.unfinished_runid = []\n\n self.frame_left_top = Frame(width=400, height=200)\n self.frame_right_top = Frame(width=400, height=200)\n self.frame_center = Frame(width=800, height=400)\n self.frame_bottom = Frame(width=800, height=50)\n\n # 定义左上方区域\n self.left_top_title = Label(self.frame_left_top, text=\"发放状态:\", font=('Arial', 25))\n self.left_top_title.grid(row=0, column=0, columnspan=2, sticky=NSEW, padx=50, pady=30)\n\n self.var_finish = StringVar()\n self.var_wait = StringVar()\n\n self.left_top_frame = Frame(self.frame_left_top)\n self.left_top_frame_left1 = Label(self.frame_left_top, text=\"已发放\", font=('Arial', 20))\n self.left_top_frame_left2 = Label(self.frame_left_top, textvariable=self.var_finish, font=('Arial', 15))\n self.var_finish.set(0)\n self.left_top_frame_right1 = Label(self.frame_left_top, text=\"未发放\", font=('Arial', 20))\n self.left_top_frame_right2 = Label(self.frame_left_top, textvariable=self.var_wait, font=('Arial', 15))\n self.var_wait.set(0)\n self.left_top_frame_left1.grid(row=1, column=0)\n self.left_top_frame_left2.grid(row=1, column=1)\n self.left_top_frame_right1.grid(row=2, column=0)\n self.left_top_frame_right2.grid(row=2, column=1)\n\n # 定义右上方区域\n self.var_entry = StringVar()\n\n self.right_top_title = Label(self.frame_right_top, text=\"切换状态(输入runid):\", font=('Arial', 20))\n self.right_top_entry = Entry(self.frame_right_top, textvariable=self.var_entry)\n\n self.number = int\n self.right_top_button = Button(self.frame_right_top, text=\"确定\", command=self.button_switch, font=('Arial', 15))\n self.right_top_title.grid(row=0, column=0)\n self.right_top_entry.grid(row=1, column=0)\n self.right_top_button.grid(row=2, column=0, padx=20, pady=20)\n\n\n # 定义中心列表区域\n self.tree = ttk.Treeview(self.frame_center, show=\"headings\", height=18, columns=(\"a\", \"b\", \"c\", \"d\", \"e\"))\n self.vbar = ttk.Scrollbar(self.frame_center, orient=VERTICAL, command=self.tree.yview)\n # 定义树形结构与滚动条\n self.tree.configure(yscrollcommand=self.vbar.set)\n\n # 表格的标题\n self.tree.column(\"a\", width=50, anchor=\"center\")\n self.tree.column(\"b\", width=150, anchor=\"center\")\n self.tree.column(\"c\", width=150, anchor=\"center\")\n self.tree.column(\"d\", width=200, anchor=\"center\")\n self.tree.column(\"e\", width=150, anchor=\"center\")\n self.tree.heading(\"a\", text=\"Runid\")\n self.tree.heading(\"b\", text=\"User\")\n self.tree.heading(\"c\", text=\"Problem\")\n self.tree.heading(\"d\", text=\"Time\")\n self.tree.heading(\"e\", text=\"Status\")\n\n # 调用方法获取表格内容插入\n self.get_tree()\n self.tree.grid(row=0, column=0, sticky=NSEW)\n self.vbar.grid(row=0, column=1, sticky=NS)\n\n # 整体区域定位\n self.frame_left_top.grid(row=0, column=0, padx=2, pady=5)\n self.frame_right_top.grid(row=0, column=1, padx=30, pady=30)\n self.frame_center.grid(row=1, column=0, columnspan=2, padx=4, pady=5)\n self.frame_bottom.grid(row=2, column=0, columnspan=2)\n\n self.frame_left_top.grid_propagate(0)\n self.frame_right_top.grid_propagate(0)\n self.frame_center.grid_propagate(0)\n self.frame_bottom.grid_propagate(0)\n\n thread.start_new_thread(self.listen, ())\n self.root.mainloop()\n\n # 表格内容插入\n def get_tree(self):\n bak_list = R.lrange(BACKUP_QUEUE_NAME, 0, -1)\n for bak in bak_list:\n bak = bak.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n elif \"%d_%d\" % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = \"end\"\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n\n node = self.tree.insert(\"\", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n def button_switch(self):\n self.number = self.right_top_entry.get()\n runid = int(self.right_top_entry.get())\n if not (runid in self.runid_to_node):\n return\n self.tree.delete(self.runid_to_node[runid])\n uid = self.runid_to_uid[runid]\n pid = self.runid_to_pid[runid]\n status_before = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n if status_before == STATUS_WAIT:\n status = STATUS_FINISHED\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_FINISHED)\n else:\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, STATUS_WAIT)\n\n if status == STATUS_FINISHED:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.pop(pos)\n pos = \"end\"\n else:\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n node = self.tree.insert(\"\", str(pos), values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))\n\n if status == STATUS_WAIT:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n self.var_finish.set(int(self.var_finish.get()) - 1)\n else:\n self.var_wait.set(int(self.var_wait.get()) - 1)\n self.var_finish.set(int(self.var_finish.get()) + 1)\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n self.runid_to_node[runid] = node\n\n def listen(self):\n while True:\n msg = R.blpop(QUEUE_NAME, 0)[1]\n R.rpush(BACKUP_QUEUE_NAME, msg)\n bak = msg.split('_')\n uid = int(bak[0])\n pid = int(bak[1])\n runid = int(bak[2])\n self.runid_to_uid[runid] = uid\n self.runid_to_pid[runid] = pid\n if R.hget(get_status_key(uid, pid), RUNID_FIELD) == None:\n R.hset(get_status_key(uid, pid), RUNID_FIELD, runid)\n status = STATUS_WAIT\n R.hset(get_status_key(uid, pid), STATUS_FIELD, status)\n submit_time = time.ctime()\n R.hset(get_status_key(uid, pid), SUBMIT_TIME_FIELD, submit_time)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n elif \"%d_%d\" % (uid, pid) in self.have_uid_pid:\n continue\n else:\n status = R.hget(get_status_key(uid, pid), STATUS_FIELD)\n submit_time = R.hget(get_status_key(uid, pid), SUBMIT_TIME_FIELD)\n self.have_uid_pid.add(\"%d_%d\" % (uid, pid))\n\n if status == STATUS_FINISHED:\n self.var_finish.set(int(self.var_finish.get()) + 1)\n pos = \"end\"\n else:\n self.var_wait.set(int(self.var_wait.get()) + 1)\n pos = lower_bound(self.unfinished_runid, runid)\n self.unfinished_runid.insert(pos, runid)\n\n node = self.tree.insert(\"\", str(pos),\n values=(runid, get_name(uid), get_problem_color(pid), submit_time, status))\n self.runid_to_node[runid] = node\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='NENU-OJ Ballon')\n parser.add_argument('--namefile', dest='namefile', required=True, type=str, help='such as namefile.json')\n parser.add_argument('--problemfile', dest='problemfile', required=True, type=str, help='such as problemfile.json')\n parser.add_argument('--redishost', dest='redishost', required=True, type=str, help='such as 127.0.0.1')\n parser.add_argument('--redisport', dest='redisport', required=True, type=int, help='such as 6379')\n parser.add_argument('--contestid', dest='contestid', required=True, type=int, help='such as 9')\n args = parser.parse_args()\n\n R = redis.Redis(host=args.redishost, port=args.redisport)\n CONTEST_ID = args.contestid\n with open(args.namefile) as f:\n NAME = json.loads(f.read())\n with open(args.problemfile) as f:\n PROBLEM_NAME = json.loads(f.read())\n\n QUEUE_NAME = \"ballon_%d\" % CONTEST_ID\n BACKUP_QUEUE_NAME = \"ballon_bak_%d\" % CONTEST_ID\n\n PrinterTkinter()\n",
"step-ids": [
5,
8,
10,
12,
13
]
}
|
[
5,
8,
10,
12,
13
] |
from oscar.app import Shop
from apps.catalogue.app import application as catalogue_app
class BaseApplication(Shop):
catalogue_app = catalogue_app
application = BaseApplication()
|
normal
|
{
"blob_id": "c8bb6ead7e305f466e24b47811d6ed38c8cfec0a",
"index": 2691,
"step-1": "<mask token>\n\n\nclass BaseApplication(Shop):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\napplication = BaseApplication()\n",
"step-4": "from oscar.app import Shop\nfrom apps.catalogue.app import application as catalogue_app\n\n\nclass BaseApplication(Shop):\n catalogue_app = catalogue_app\n\n\napplication = BaseApplication()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from typing import List, Tuple
test_string = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
with open('data/day8_input.txt', 'r') as fp:
my_string = fp.read()
class Node:
def __init__(self):
self.metadata = list()
self.children = list()
def checksum(self):
return sum([x for x in self.metadata])
def add_child(self, child):
self.children.append(child)
pass
def value(self):
if len(self.children) == 0:
return self.checksum()
else:
val = 0
for m in self.metadata:
if m > 0 and m <= len(self.children):
val += self.children[m-1].value()
return val
def parse_string(my_string : str) -> List[int]:
return [int(x) for x in my_string.split(" ")]
def parse_node(codes: List[int], idx : int) -> Tuple[Node, int]:
num_children = codes[idx]
num_metadata = codes[idx + 1]
node = Node()
j = idx + 2
for i in range(num_children):
child, j = parse_node(codes, j)
node.add_child(child)
meta = list()
for i in range(num_metadata):
meta.append(codes[j])
j += 1
node.metadata = meta
return (node, j)
codes = parse_string(my_string)
tree, _ = parse_node(codes, 0)
def checksum(node):
c = node.checksum()
for child in node.children:
c += checksum(child)
return c
print(checksum(tree))
print(tree.value())
|
normal
|
{
"blob_id": "3bea4413a41a9eecb5e3184d090b646e17892b5c",
"index": 5277,
"step-1": "<mask token>\n\n\nclass Node:\n\n def __init__(self):\n self.metadata = list()\n self.children = list()\n\n def checksum(self):\n return sum([x for x in self.metadata])\n\n def add_child(self, child):\n self.children.append(child)\n pass\n\n def value(self):\n if len(self.children) == 0:\n return self.checksum()\n else:\n val = 0\n for m in self.metadata:\n if m > 0 and m <= len(self.children):\n val += self.children[m - 1].value()\n return val\n\n\ndef parse_string(my_string: str) ->List[int]:\n return [int(x) for x in my_string.split(' ')]\n\n\ndef parse_node(codes: List[int], idx: int) ->Tuple[Node, int]:\n num_children = codes[idx]\n num_metadata = codes[idx + 1]\n node = Node()\n j = idx + 2\n for i in range(num_children):\n child, j = parse_node(codes, j)\n node.add_child(child)\n meta = list()\n for i in range(num_metadata):\n meta.append(codes[j])\n j += 1\n node.metadata = meta\n return node, j\n\n\n<mask token>\n\n\ndef checksum(node):\n c = node.checksum()\n for child in node.children:\n c += checksum(child)\n return c\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('data/day8_input.txt', 'r') as fp:\n my_string = fp.read()\n\n\nclass Node:\n\n def __init__(self):\n self.metadata = list()\n self.children = list()\n\n def checksum(self):\n return sum([x for x in self.metadata])\n\n def add_child(self, child):\n self.children.append(child)\n pass\n\n def value(self):\n if len(self.children) == 0:\n return self.checksum()\n else:\n val = 0\n for m in self.metadata:\n if m > 0 and m <= len(self.children):\n val += self.children[m - 1].value()\n return val\n\n\ndef parse_string(my_string: str) ->List[int]:\n return [int(x) for x in my_string.split(' ')]\n\n\ndef parse_node(codes: List[int], idx: int) ->Tuple[Node, int]:\n num_children = codes[idx]\n num_metadata = codes[idx + 1]\n node = Node()\n j = idx + 2\n for i in range(num_children):\n child, j = parse_node(codes, j)\n node.add_child(child)\n meta = list()\n for i in range(num_metadata):\n meta.append(codes[j])\n j += 1\n node.metadata = meta\n return node, j\n\n\n<mask token>\n\n\ndef checksum(node):\n c = node.checksum()\n for child in node.children:\n c += checksum(child)\n return c\n\n\nprint(checksum(tree))\nprint(tree.value())\n",
"step-3": "<mask token>\ntest_string = '2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2'\nwith open('data/day8_input.txt', 'r') as fp:\n my_string = fp.read()\n\n\nclass Node:\n\n def __init__(self):\n self.metadata = list()\n self.children = list()\n\n def checksum(self):\n return sum([x for x in self.metadata])\n\n def add_child(self, child):\n self.children.append(child)\n pass\n\n def value(self):\n if len(self.children) == 0:\n return self.checksum()\n else:\n val = 0\n for m in self.metadata:\n if m > 0 and m <= len(self.children):\n val += self.children[m - 1].value()\n return val\n\n\ndef parse_string(my_string: str) ->List[int]:\n return [int(x) for x in my_string.split(' ')]\n\n\ndef parse_node(codes: List[int], idx: int) ->Tuple[Node, int]:\n num_children = codes[idx]\n num_metadata = codes[idx + 1]\n node = Node()\n j = idx + 2\n for i in range(num_children):\n child, j = parse_node(codes, j)\n node.add_child(child)\n meta = list()\n for i in range(num_metadata):\n meta.append(codes[j])\n j += 1\n node.metadata = meta\n return node, j\n\n\ncodes = parse_string(my_string)\ntree, _ = parse_node(codes, 0)\n\n\ndef checksum(node):\n c = node.checksum()\n for child in node.children:\n c += checksum(child)\n return c\n\n\nprint(checksum(tree))\nprint(tree.value())\n",
"step-4": "from typing import List, Tuple\ntest_string = '2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2'\nwith open('data/day8_input.txt', 'r') as fp:\n my_string = fp.read()\n\n\nclass Node:\n\n def __init__(self):\n self.metadata = list()\n self.children = list()\n\n def checksum(self):\n return sum([x for x in self.metadata])\n\n def add_child(self, child):\n self.children.append(child)\n pass\n\n def value(self):\n if len(self.children) == 0:\n return self.checksum()\n else:\n val = 0\n for m in self.metadata:\n if m > 0 and m <= len(self.children):\n val += self.children[m - 1].value()\n return val\n\n\ndef parse_string(my_string: str) ->List[int]:\n return [int(x) for x in my_string.split(' ')]\n\n\ndef parse_node(codes: List[int], idx: int) ->Tuple[Node, int]:\n num_children = codes[idx]\n num_metadata = codes[idx + 1]\n node = Node()\n j = idx + 2\n for i in range(num_children):\n child, j = parse_node(codes, j)\n node.add_child(child)\n meta = list()\n for i in range(num_metadata):\n meta.append(codes[j])\n j += 1\n node.metadata = meta\n return node, j\n\n\ncodes = parse_string(my_string)\ntree, _ = parse_node(codes, 0)\n\n\ndef checksum(node):\n c = node.checksum()\n for child in node.children:\n c += checksum(child)\n return c\n\n\nprint(checksum(tree))\nprint(tree.value())\n",
"step-5": "from typing import List, Tuple\n\ntest_string = \"2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\"\nwith open('data/day8_input.txt', 'r') as fp:\n my_string = fp.read()\n\n\nclass Node:\n def __init__(self):\n self.metadata = list()\n self.children = list()\n\n def checksum(self):\n return sum([x for x in self.metadata])\n\n def add_child(self, child):\n self.children.append(child)\n pass\n\n def value(self):\n if len(self.children) == 0:\n return self.checksum()\n else:\n val = 0\n for m in self.metadata:\n if m > 0 and m <= len(self.children):\n val += self.children[m-1].value()\n return val\n\n\ndef parse_string(my_string : str) -> List[int]:\n return [int(x) for x in my_string.split(\" \")]\n\n\ndef parse_node(codes: List[int], idx : int) -> Tuple[Node, int]:\n num_children = codes[idx]\n num_metadata = codes[idx + 1]\n node = Node()\n\n j = idx + 2\n for i in range(num_children):\n child, j = parse_node(codes, j)\n node.add_child(child)\n\n meta = list()\n for i in range(num_metadata):\n meta.append(codes[j])\n j += 1\n node.metadata = meta\n return (node, j)\n\n\ncodes = parse_string(my_string)\ntree, _ = parse_node(codes, 0)\n\ndef checksum(node):\n c = node.checksum()\n for child in node.children:\n c += checksum(child)\n return c\n\n\nprint(checksum(tree))\nprint(tree.value())",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
class QStudyingsView(QObjectsView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QStudyingsView(QObjectsView):
<|reserved_special_token_0|>
def init_table(self):
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QStudyingsView(QObjectsView):
def __init__(self, parent):
QWidget.__init__(self, parent)
QObjectsView.__init__(self, parent)
self.set_presenter(StudyingsPresenter(view=self))
def init_table(self):
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
<|reserved_special_token_1|>
from PyQt5.QtWidgets import QHeaderView, QWidget
from presenters.studyings_presenter import StudyingsPresenter
from view.q_objects_view import QObjectsView
class QStudyingsView(QObjectsView):
def __init__(self, parent):
QWidget.__init__(self, parent)
QObjectsView.__init__(self, parent)
self.set_presenter(StudyingsPresenter(view=self))
def init_table(self):
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])
self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
|
flexible
|
{
"blob_id": "f7174bf4e7612921e730ac87141c85654a2f2411",
"index": 6194,
"step-1": "<mask token>\n\n\nclass QStudyingsView(QObjectsView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass QStudyingsView(QObjectsView):\n <mask token>\n\n def init_table(self):\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n",
"step-3": "<mask token>\n\n\nclass QStudyingsView(QObjectsView):\n\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n QObjectsView.__init__(self, parent)\n self.set_presenter(StudyingsPresenter(view=self))\n\n def init_table(self):\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n",
"step-4": "from PyQt5.QtWidgets import QHeaderView, QWidget\nfrom presenters.studyings_presenter import StudyingsPresenter\nfrom view.q_objects_view import QObjectsView\n\n\nclass QStudyingsView(QObjectsView):\n\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n QObjectsView.__init__(self, parent)\n self.set_presenter(StudyingsPresenter(view=self))\n\n def init_table(self):\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['Время начала', 'Число', 'Темы'])\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from flask import render_template
from database import db
from api import app
from models import create_models
# Create a URL route in application for "/"
@app.route('/')
def home():
return render_template('home.html')
# If in stand alone mode, run the application
if __name__ == '__main__':
db.connect()
create_models()
app.run(debug=True)
|
normal
|
{
"blob_id": "5a0a8205977e59ff59a5d334a487cf96eee514d2",
"index": 7211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n db.connect()\n create_models()\n app.run(debug=True)\n",
"step-4": "from flask import render_template\nfrom database import db\nfrom api import app\nfrom models import create_models\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\nif __name__ == '__main__':\n db.connect()\n create_models()\n app.run(debug=True)\n",
"step-5": "from flask import render_template\nfrom database import db\nfrom api import app\nfrom models import create_models\n\n# Create a URL route in application for \"/\"\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n# If in stand alone mode, run the application\nif __name__ == '__main__':\n db.connect()\n create_models()\n app.run(debug=True)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.template import Context, loader
from django.db import transaction
from django.db.models import Q
from maximus.models import Mercenary, Team, TeamMember, Tournament, TournamentTeam, TournamentMatchup, Matchup, MatchupStatistics, MatchResult
def index(request):
model = Context({})
t = loader.get_template('index.html')
return HttpResponse(t.render(model))
def create_team(request):
def get():
heroes = Mercenary.objects.filter(type='HERO')
pawns = Mercenary.objects.filter(type='PAWN')
model = Context({ 'heroes': heroes, 'pawns': pawns, 'mercrange': range(1,7), 'teams': get_team_groups() })
t = loader.get_template('teams.html')
return HttpResponse(t.render(model))
def post():
team = Team()
class_c = request.POST['hero']
leader = Mercenary.objects.filter(type='HERO').filter(name=class_c)
team.leader = leader[0]
team.wins = 0
team.losses = 0
team.notes = ""
team.save()
for i in range(1,10):
who = request.POST['pawn%s' % i]
if who != '':
merc = Mercenary.objects.filter(type='PAWN').filter(name=who)
current = TeamMember()
current.team = team
current.merc = merc[0]
current.location = i
current.save()
return HttpResponseRedirect('/app/teams')
if request.method == "POST":
return post()
else:
return get()
def edit_team(request):
def get():
team_id = request.GET["team"]
team = Team.objects.get(id=team_id)
model = Context({ 'team': team })
t = loader.get_template('edit_team.html')
return HttpResponse(t.render(model))
def post():
new_notes = request.POST["notes"]
team_id = request.POST["team"]
team = Team.objects.get(id=team_id)
team.notes = new_notes
team.save()
return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)
if request.method == "POST":
return post()
else:
return get()
def create_tournament(request):
def get():
inprogress = Tournament.objects.filter(completed=False);
finished = Tournament.objects.filter(completed=True);
model = Context({ 'teams': get_team_groups(), "in_progress": inprogress, "finished": finished })
t = loader.get_template('tournament/create_tournament.html')
return HttpResponse(t.render(model))
@transaction.commit_on_success
def post():
tournament = Tournament()
tournament.completed = False
tournament.save()
for team_id in request.POST.getlist('participant'):
if team_id != "":
team = Team.objects.get(id=team_id)
tourney_team = TournamentTeam()
tourney_team.tournament = tournament
tourney_team.team = team
tourney_team.save()
return HttpResponseRedirect('/app/tournament/matchups?tournament=%s' % str(tournament.id))
if request.method == "POST":
return post()
else:
return get()
def view_tournament(request):
def get():
tourney = Tournament.objects.get(id=request.GET["tournament"])
pending_teams = []
teams = []
for team in tourney.tourney_team_set.all():
if team.matchup_index == None:
pending_teams.append(team.team)
else:
teams.append(team.team)
matches = [[i for i in range(0,4)],[i for i in range(0,2)],[0]]
for match in tourney.tourney_match_set.all():
matches[match.round][match.index] = match
model = Context({ "pending_teams": pending_teams, "teams": teams, "matches": matches, "tourney": tourney})
t = loader.get_template('tournament/view_tournament.html')
return HttpResponse(t.render(model))
@transaction.commit_on_success
def post():
tourney_id = request.GET["tournament"]
tourney = Tournament.objects.get(id=tourney_id)
versus = request.POST.getlist("versus")
teams = []
for team_id in versus:
if team_id != "":
teams.append(Team.objects.get(id=team_id))
existing_matches = TournamentMatchup.objects.filter(tournament=tourney)
match = Matchup()
match.team1 = teams[0]
match.team2 = teams[1]
match.save()
tourney_match = TournamentMatchup()
tourney_match.tournament = tourney
tourney_match.matchup = match
tourney_match.round = 0
tourney_match.index = existing_matches.count()
tourney_match.save()
tourney_teams = []
tourney_teams.append(TournamentTeam.objects.filter(tournament=tourney).filter(team=teams[0]).get())
tourney_teams.append(TournamentTeam.objects.filter(tournament=tourney).filter(team=teams[1]).get())
tourney_teams[0].matchup_index = tourney_match.index * 2
tourney_teams[1].matchup_index = tourney_match.index * 2 + 1
tourney_teams[0].save();
tourney_teams[1].save();
return HttpResponseRedirect("/app/tournament/matchups?tournament=%s" % tourney_id)
if request.method == "POST":
return post()
else:
return get()
def result_tournament(request):
@transaction.commit_on_success
def post():
tournament_match_id = request.GET['tournament_match_key']
match = TournamentMatchup.objects.get(id=tournament_match_id)
winner_id = int(request.POST['winner'])
matchup = match.matchup
result = MatchResult()
if winner_id == matchup.team1.id:
result.winner = matchup.team1
result.loser = matchup.team2
elif winner_id == matchup.team2.id:
result.winner = matchup.team2
result.loser = matchup.team1
else:
raise Exception("could not determine winner key: %s (%s, %s)" % (winner_id, matchup.team1.id, matchup.team2.id))
update_stats(result.winner, result.loser)
result.save()
next_round_indices = {0:0, 1:0, 2:1, 3:1}
next_round_index = next_round_indices[match.index]
next_round = match.round + 1
if match.round < 2:
# look in existing matches for this winner's opponent
existing = TournamentMatchup.objects.filter(tournament=match.tournament).filter(round=next_round).filter(index=next_round_index)
if existing.count() == 1:
next_match = existing[0]
next_matchup = next_match.matchup
next_matchup.team2 = result.winner
next_matchup.save()
elif existing.count() == 0:
next_match = TournamentMatchup()
next_matchup = Matchup()
next_matchup.team1 = result.winner
next_matchup.save()
next_match.tournament = match.tournament
next_match.round = next_round
next_match.index = next_round_index
next_match.matchup = next_matchup
next_match.save()
else:
tourney = match.tournament
tourney.completed = True
tourney.winner = result.winner
tourney.save()
match.matchup.delete()
match.matchup = None
match.result = result
match.save()
return HttpResponseRedirect("/app/tournament/matchups?tournament=%s" % match.tournament.id)
if request.method == "POST":
return post()
else:
return HttpResponseRedirect("/app/tournament/matchups?tournament=%s" % request.GET["tournament"])
def result_detail(request):
result_id = request.GET['match']
match = MatchResult.objects.get(id=result_id)
model = Context({ 'match': match })
t = loader.get_template('result_detail.html')
return HttpResponse(t.render(model))
def get_team_groups():
teams = Team.objects.all()
team_groups = { }
for team in teams:
if not team.leader in team_groups:
team_groups[team.leader] = []
team_groups[team.leader].append(team)
team_groups = [sorted(team_groups[k], lambda x,y: cmp(x.id, y.id)) for k in sorted(team_groups.keys(), lambda x,y: cmp(x.name, y.name))]
return team_groups
def update_stats(winner, loser):
existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id, loser.id]) & Q(team2__in=[winner.id, loser.id]))
stats = None
if existing.count() == 0:
newStats = MatchupStatistics()
newStats.team1 = winner
newStats.team2 = loser
newStats.team1_wins = 1
newStats.team2_wins = 0
winner.wins = winner.wins + 1
loser.losses = loser.losses + 1
newStats.save()
winner.save()
loser.save()
return (1, 0)
elif existing.count() == 1:
oldStats = existing.fetch(1)[0]
if oldStats.team1.id == winner.id:
oldStats.team1_wins = oldStats.team1_wins + 1
else:
oldStats.team2_wins = oldStats.team2_wins + 1
winner.wins = winner.wins + 1
loser.losses = loser.losses + 1
oldStats.save()
winner.save()
loser.save()
return (0, 1)
else:
logging.error("unexpected state: %s matchup statistics for the same team pair (expected 1)" % existing.count())
return (0, 0)
|
normal
|
{
"blob_id": "f66f82c5c2842fc4fcae2251d4a16a9850230041",
"index": 3547,
"step-1": "<mask token>\n\n\ndef edit_team(request):\n\n def get():\n team_id = request.GET['team']\n team = Team.objects.get(id=team_id)\n model = Context({'team': team})\n t = loader.get_template('edit_team.html')\n return HttpResponse(t.render(model))\n\n def post():\n new_notes = request.POST['notes']\n team_id = request.POST['team']\n team = Team.objects.get(id=team_id)\n team.notes = new_notes\n team.save()\n return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef create_tournament(request):\n\n def get():\n inprogress = Tournament.objects.filter(completed=False)\n finished = Tournament.objects.filter(completed=True)\n model = Context({'teams': get_team_groups(), 'in_progress':\n inprogress, 'finished': finished})\n t = loader.get_template('tournament/create_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tournament = Tournament()\n tournament.completed = False\n tournament.save()\n for team_id in request.POST.getlist('participant'):\n if team_id != '':\n team = Team.objects.get(id=team_id)\n tourney_team = TournamentTeam()\n tourney_team.tournament = tournament\n tourney_team.team = team\n tourney_team.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % str(tournament.id))\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef view_tournament(request):\n\n def get():\n tourney = Tournament.objects.get(id=request.GET['tournament'])\n pending_teams = []\n teams = []\n for team in tourney.tourney_team_set.all():\n if team.matchup_index == None:\n pending_teams.append(team.team)\n else:\n teams.append(team.team)\n matches = [[i for i in range(0, 4)], [i for i in range(0, 2)], [0]]\n for match in tourney.tourney_match_set.all():\n matches[match.round][match.index] = match\n model = Context({'pending_teams': pending_teams, 'teams': teams,\n 'matches': matches, 'tourney': tourney})\n t = loader.get_template('tournament/view_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tourney_id = request.GET['tournament']\n tourney = Tournament.objects.get(id=tourney_id)\n versus = request.POST.getlist('versus')\n teams = []\n for team_id in versus:\n if team_id != '':\n teams.append(Team.objects.get(id=team_id))\n existing_matches = TournamentMatchup.objects.filter(tournament=tourney)\n match = Matchup()\n match.team1 = teams[0]\n match.team2 = teams[1]\n match.save()\n tourney_match = TournamentMatchup()\n tourney_match.tournament = tourney\n tourney_match.matchup = match\n tourney_match.round = 0\n tourney_match.index = existing_matches.count()\n tourney_match.save()\n tourney_teams = []\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[0]).get())\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[1]).get())\n tourney_teams[0].matchup_index = tourney_match.index * 2\n tourney_teams[1].matchup_index = tourney_match.index * 2 + 1\n tourney_teams[0].save()\n tourney_teams[1].save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % tourney_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef result_tournament(request):\n\n @transaction.commit_on_success\n def post():\n tournament_match_id = request.GET['tournament_match_key']\n match = TournamentMatchup.objects.get(id=tournament_match_id)\n winner_id = int(request.POST['winner'])\n matchup = match.matchup\n result = MatchResult()\n if winner_id == matchup.team1.id:\n result.winner = matchup.team1\n result.loser = matchup.team2\n elif winner_id == matchup.team2.id:\n result.winner = matchup.team2\n result.loser = matchup.team1\n else:\n raise Exception('could not determine winner key: %s (%s, %s)' %\n (winner_id, matchup.team1.id, matchup.team2.id))\n update_stats(result.winner, result.loser)\n result.save()\n next_round_indices = {(0): 0, (1): 0, (2): 1, (3): 1}\n next_round_index = next_round_indices[match.index]\n next_round = match.round + 1\n if match.round < 2:\n existing = TournamentMatchup.objects.filter(tournament=match.\n tournament).filter(round=next_round).filter(index=\n next_round_index)\n if existing.count() == 1:\n next_match = existing[0]\n next_matchup = next_match.matchup\n next_matchup.team2 = result.winner\n next_matchup.save()\n elif existing.count() == 0:\n next_match = TournamentMatchup()\n next_matchup = Matchup()\n next_matchup.team1 = result.winner\n next_matchup.save()\n next_match.tournament = match.tournament\n next_match.round = next_round\n next_match.index = next_round_index\n next_match.matchup = next_matchup\n next_match.save()\n else:\n tourney = match.tournament\n tourney.completed = True\n tourney.winner = result.winner\n tourney.save()\n match.matchup.delete()\n match.matchup = None\n match.result = result\n match.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % match.tournament.id)\n if request.method == 'POST':\n return post()\n else:\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % request.GET[\n 'tournament'])\n\n\n<mask token>\n\n\ndef get_team_groups():\n teams = Team.objects.all()\n team_groups = {}\n for team in teams:\n if not team.leader in team_groups:\n team_groups[team.leader] = []\n team_groups[team.leader].append(team)\n team_groups = [sorted(team_groups[k], lambda x, y: cmp(x.id, y.id)) for\n k in sorted(team_groups.keys(), lambda x, y: cmp(x.name, y.name))]\n return team_groups\n\n\ndef update_stats(winner, loser):\n existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id,\n loser.id]) & Q(team2__in=[winner.id, loser.id]))\n stats = None\n if existing.count() == 0:\n newStats = MatchupStatistics()\n newStats.team1 = winner\n newStats.team2 = loser\n newStats.team1_wins = 1\n newStats.team2_wins = 0\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n newStats.save()\n winner.save()\n loser.save()\n return 1, 0\n elif existing.count() == 1:\n oldStats = existing.fetch(1)[0]\n if oldStats.team1.id == winner.id:\n oldStats.team1_wins = oldStats.team1_wins + 1\n else:\n oldStats.team2_wins = oldStats.team2_wins + 1\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n oldStats.save()\n winner.save()\n loser.save()\n return 0, 1\n else:\n logging.error(\n 'unexpected state: %s matchup statistics for the same team pair (expected 1)'\n % existing.count())\n return 0, 0\n",
"step-2": "<mask token>\n\n\ndef index(request):\n model = Context({})\n t = loader.get_template('index.html')\n return HttpResponse(t.render(model))\n\n\n<mask token>\n\n\ndef edit_team(request):\n\n def get():\n team_id = request.GET['team']\n team = Team.objects.get(id=team_id)\n model = Context({'team': team})\n t = loader.get_template('edit_team.html')\n return HttpResponse(t.render(model))\n\n def post():\n new_notes = request.POST['notes']\n team_id = request.POST['team']\n team = Team.objects.get(id=team_id)\n team.notes = new_notes\n team.save()\n return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef create_tournament(request):\n\n def get():\n inprogress = Tournament.objects.filter(completed=False)\n finished = Tournament.objects.filter(completed=True)\n model = Context({'teams': get_team_groups(), 'in_progress':\n inprogress, 'finished': finished})\n t = loader.get_template('tournament/create_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tournament = Tournament()\n tournament.completed = False\n tournament.save()\n for team_id in request.POST.getlist('participant'):\n if team_id != '':\n team = Team.objects.get(id=team_id)\n tourney_team = TournamentTeam()\n tourney_team.tournament = tournament\n tourney_team.team = team\n tourney_team.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % str(tournament.id))\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef view_tournament(request):\n\n def get():\n tourney = Tournament.objects.get(id=request.GET['tournament'])\n pending_teams = []\n teams = []\n for team in tourney.tourney_team_set.all():\n if team.matchup_index == None:\n pending_teams.append(team.team)\n else:\n teams.append(team.team)\n matches = [[i for i in range(0, 4)], [i for i in range(0, 2)], [0]]\n for match in tourney.tourney_match_set.all():\n matches[match.round][match.index] = match\n model = Context({'pending_teams': pending_teams, 'teams': teams,\n 'matches': matches, 'tourney': tourney})\n t = loader.get_template('tournament/view_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tourney_id = request.GET['tournament']\n tourney = Tournament.objects.get(id=tourney_id)\n versus = request.POST.getlist('versus')\n teams = []\n for team_id in versus:\n if team_id != '':\n teams.append(Team.objects.get(id=team_id))\n existing_matches = TournamentMatchup.objects.filter(tournament=tourney)\n match = Matchup()\n match.team1 = teams[0]\n match.team2 = teams[1]\n match.save()\n tourney_match = TournamentMatchup()\n tourney_match.tournament = tourney\n tourney_match.matchup = match\n tourney_match.round = 0\n tourney_match.index = existing_matches.count()\n tourney_match.save()\n tourney_teams = []\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[0]).get())\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[1]).get())\n tourney_teams[0].matchup_index = tourney_match.index * 2\n tourney_teams[1].matchup_index = tourney_match.index * 2 + 1\n tourney_teams[0].save()\n tourney_teams[1].save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % tourney_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef result_tournament(request):\n\n @transaction.commit_on_success\n def post():\n tournament_match_id = request.GET['tournament_match_key']\n match = TournamentMatchup.objects.get(id=tournament_match_id)\n winner_id = int(request.POST['winner'])\n matchup = match.matchup\n result = MatchResult()\n if winner_id == matchup.team1.id:\n result.winner = matchup.team1\n result.loser = matchup.team2\n elif winner_id == matchup.team2.id:\n result.winner = matchup.team2\n result.loser = matchup.team1\n else:\n raise Exception('could not determine winner key: %s (%s, %s)' %\n (winner_id, matchup.team1.id, matchup.team2.id))\n update_stats(result.winner, result.loser)\n result.save()\n next_round_indices = {(0): 0, (1): 0, (2): 1, (3): 1}\n next_round_index = next_round_indices[match.index]\n next_round = match.round + 1\n if match.round < 2:\n existing = TournamentMatchup.objects.filter(tournament=match.\n tournament).filter(round=next_round).filter(index=\n next_round_index)\n if existing.count() == 1:\n next_match = existing[0]\n next_matchup = next_match.matchup\n next_matchup.team2 = result.winner\n next_matchup.save()\n elif existing.count() == 0:\n next_match = TournamentMatchup()\n next_matchup = Matchup()\n next_matchup.team1 = result.winner\n next_matchup.save()\n next_match.tournament = match.tournament\n next_match.round = next_round\n next_match.index = next_round_index\n next_match.matchup = next_matchup\n next_match.save()\n else:\n tourney = match.tournament\n tourney.completed = True\n tourney.winner = result.winner\n tourney.save()\n match.matchup.delete()\n match.matchup = None\n match.result = result\n match.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % match.tournament.id)\n if request.method == 'POST':\n return post()\n else:\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % request.GET[\n 'tournament'])\n\n\n<mask token>\n\n\ndef get_team_groups():\n teams = Team.objects.all()\n team_groups = {}\n for team in teams:\n if not team.leader in team_groups:\n team_groups[team.leader] = []\n team_groups[team.leader].append(team)\n team_groups = [sorted(team_groups[k], lambda x, y: cmp(x.id, y.id)) for\n k in sorted(team_groups.keys(), lambda x, y: cmp(x.name, y.name))]\n return team_groups\n\n\ndef update_stats(winner, loser):\n existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id,\n loser.id]) & Q(team2__in=[winner.id, loser.id]))\n stats = None\n if existing.count() == 0:\n newStats = MatchupStatistics()\n newStats.team1 = winner\n newStats.team2 = loser\n newStats.team1_wins = 1\n newStats.team2_wins = 0\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n newStats.save()\n winner.save()\n loser.save()\n return 1, 0\n elif existing.count() == 1:\n oldStats = existing.fetch(1)[0]\n if oldStats.team1.id == winner.id:\n oldStats.team1_wins = oldStats.team1_wins + 1\n else:\n oldStats.team2_wins = oldStats.team2_wins + 1\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n oldStats.save()\n winner.save()\n loser.save()\n return 0, 1\n else:\n logging.error(\n 'unexpected state: %s matchup statistics for the same team pair (expected 1)'\n % existing.count())\n return 0, 0\n",
"step-3": "<mask token>\n\n\ndef index(request):\n model = Context({})\n t = loader.get_template('index.html')\n return HttpResponse(t.render(model))\n\n\ndef create_team(request):\n\n def get():\n heroes = Mercenary.objects.filter(type='HERO')\n pawns = Mercenary.objects.filter(type='PAWN')\n model = Context({'heroes': heroes, 'pawns': pawns, 'mercrange':\n range(1, 7), 'teams': get_team_groups()})\n t = loader.get_template('teams.html')\n return HttpResponse(t.render(model))\n\n def post():\n team = Team()\n class_c = request.POST['hero']\n leader = Mercenary.objects.filter(type='HERO').filter(name=class_c)\n team.leader = leader[0]\n team.wins = 0\n team.losses = 0\n team.notes = ''\n team.save()\n for i in range(1, 10):\n who = request.POST['pawn%s' % i]\n if who != '':\n merc = Mercenary.objects.filter(type='PAWN').filter(name=who)\n current = TeamMember()\n current.team = team\n current.merc = merc[0]\n current.location = i\n current.save()\n return HttpResponseRedirect('/app/teams')\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef edit_team(request):\n\n def get():\n team_id = request.GET['team']\n team = Team.objects.get(id=team_id)\n model = Context({'team': team})\n t = loader.get_template('edit_team.html')\n return HttpResponse(t.render(model))\n\n def post():\n new_notes = request.POST['notes']\n team_id = request.POST['team']\n team = Team.objects.get(id=team_id)\n team.notes = new_notes\n team.save()\n return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef create_tournament(request):\n\n def get():\n inprogress = Tournament.objects.filter(completed=False)\n finished = Tournament.objects.filter(completed=True)\n model = Context({'teams': get_team_groups(), 'in_progress':\n inprogress, 'finished': finished})\n t = loader.get_template('tournament/create_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tournament = Tournament()\n tournament.completed = False\n tournament.save()\n for team_id in request.POST.getlist('participant'):\n if team_id != '':\n team = Team.objects.get(id=team_id)\n tourney_team = TournamentTeam()\n tourney_team.tournament = tournament\n tourney_team.team = team\n tourney_team.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % str(tournament.id))\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef view_tournament(request):\n\n def get():\n tourney = Tournament.objects.get(id=request.GET['tournament'])\n pending_teams = []\n teams = []\n for team in tourney.tourney_team_set.all():\n if team.matchup_index == None:\n pending_teams.append(team.team)\n else:\n teams.append(team.team)\n matches = [[i for i in range(0, 4)], [i for i in range(0, 2)], [0]]\n for match in tourney.tourney_match_set.all():\n matches[match.round][match.index] = match\n model = Context({'pending_teams': pending_teams, 'teams': teams,\n 'matches': matches, 'tourney': tourney})\n t = loader.get_template('tournament/view_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tourney_id = request.GET['tournament']\n tourney = Tournament.objects.get(id=tourney_id)\n versus = request.POST.getlist('versus')\n teams = []\n for team_id in versus:\n if team_id != '':\n teams.append(Team.objects.get(id=team_id))\n existing_matches = TournamentMatchup.objects.filter(tournament=tourney)\n match = Matchup()\n match.team1 = teams[0]\n match.team2 = teams[1]\n match.save()\n tourney_match = TournamentMatchup()\n tourney_match.tournament = tourney\n tourney_match.matchup = match\n tourney_match.round = 0\n tourney_match.index = existing_matches.count()\n tourney_match.save()\n tourney_teams = []\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[0]).get())\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[1]).get())\n tourney_teams[0].matchup_index = tourney_match.index * 2\n tourney_teams[1].matchup_index = tourney_match.index * 2 + 1\n tourney_teams[0].save()\n tourney_teams[1].save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % tourney_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef result_tournament(request):\n\n @transaction.commit_on_success\n def post():\n tournament_match_id = request.GET['tournament_match_key']\n match = TournamentMatchup.objects.get(id=tournament_match_id)\n winner_id = int(request.POST['winner'])\n matchup = match.matchup\n result = MatchResult()\n if winner_id == matchup.team1.id:\n result.winner = matchup.team1\n result.loser = matchup.team2\n elif winner_id == matchup.team2.id:\n result.winner = matchup.team2\n result.loser = matchup.team1\n else:\n raise Exception('could not determine winner key: %s (%s, %s)' %\n (winner_id, matchup.team1.id, matchup.team2.id))\n update_stats(result.winner, result.loser)\n result.save()\n next_round_indices = {(0): 0, (1): 0, (2): 1, (3): 1}\n next_round_index = next_round_indices[match.index]\n next_round = match.round + 1\n if match.round < 2:\n existing = TournamentMatchup.objects.filter(tournament=match.\n tournament).filter(round=next_round).filter(index=\n next_round_index)\n if existing.count() == 1:\n next_match = existing[0]\n next_matchup = next_match.matchup\n next_matchup.team2 = result.winner\n next_matchup.save()\n elif existing.count() == 0:\n next_match = TournamentMatchup()\n next_matchup = Matchup()\n next_matchup.team1 = result.winner\n next_matchup.save()\n next_match.tournament = match.tournament\n next_match.round = next_round\n next_match.index = next_round_index\n next_match.matchup = next_matchup\n next_match.save()\n else:\n tourney = match.tournament\n tourney.completed = True\n tourney.winner = result.winner\n tourney.save()\n match.matchup.delete()\n match.matchup = None\n match.result = result\n match.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % match.tournament.id)\n if request.method == 'POST':\n return post()\n else:\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % request.GET[\n 'tournament'])\n\n\ndef result_detail(request):\n result_id = request.GET['match']\n match = MatchResult.objects.get(id=result_id)\n model = Context({'match': match})\n t = loader.get_template('result_detail.html')\n return HttpResponse(t.render(model))\n\n\ndef get_team_groups():\n teams = Team.objects.all()\n team_groups = {}\n for team in teams:\n if not team.leader in team_groups:\n team_groups[team.leader] = []\n team_groups[team.leader].append(team)\n team_groups = [sorted(team_groups[k], lambda x, y: cmp(x.id, y.id)) for\n k in sorted(team_groups.keys(), lambda x, y: cmp(x.name, y.name))]\n return team_groups\n\n\ndef update_stats(winner, loser):\n existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id,\n loser.id]) & Q(team2__in=[winner.id, loser.id]))\n stats = None\n if existing.count() == 0:\n newStats = MatchupStatistics()\n newStats.team1 = winner\n newStats.team2 = loser\n newStats.team1_wins = 1\n newStats.team2_wins = 0\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n newStats.save()\n winner.save()\n loser.save()\n return 1, 0\n elif existing.count() == 1:\n oldStats = existing.fetch(1)[0]\n if oldStats.team1.id == winner.id:\n oldStats.team1_wins = oldStats.team1_wins + 1\n else:\n oldStats.team2_wins = oldStats.team2_wins + 1\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n oldStats.save()\n winner.save()\n loser.save()\n return 0, 1\n else:\n logging.error(\n 'unexpected state: %s matchup statistics for the same team pair (expected 1)'\n % existing.count())\n return 0, 0\n",
"step-4": "from django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import Context, loader\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom maximus.models import Mercenary, Team, TeamMember, Tournament, TournamentTeam, TournamentMatchup, Matchup, MatchupStatistics, MatchResult\n\n\ndef index(request):\n model = Context({})\n t = loader.get_template('index.html')\n return HttpResponse(t.render(model))\n\n\ndef create_team(request):\n\n def get():\n heroes = Mercenary.objects.filter(type='HERO')\n pawns = Mercenary.objects.filter(type='PAWN')\n model = Context({'heroes': heroes, 'pawns': pawns, 'mercrange':\n range(1, 7), 'teams': get_team_groups()})\n t = loader.get_template('teams.html')\n return HttpResponse(t.render(model))\n\n def post():\n team = Team()\n class_c = request.POST['hero']\n leader = Mercenary.objects.filter(type='HERO').filter(name=class_c)\n team.leader = leader[0]\n team.wins = 0\n team.losses = 0\n team.notes = ''\n team.save()\n for i in range(1, 10):\n who = request.POST['pawn%s' % i]\n if who != '':\n merc = Mercenary.objects.filter(type='PAWN').filter(name=who)\n current = TeamMember()\n current.team = team\n current.merc = merc[0]\n current.location = i\n current.save()\n return HttpResponseRedirect('/app/teams')\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef edit_team(request):\n\n def get():\n team_id = request.GET['team']\n team = Team.objects.get(id=team_id)\n model = Context({'team': team})\n t = loader.get_template('edit_team.html')\n return HttpResponse(t.render(model))\n\n def post():\n new_notes = request.POST['notes']\n team_id = request.POST['team']\n team = Team.objects.get(id=team_id)\n team.notes = new_notes\n team.save()\n return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef create_tournament(request):\n\n def get():\n inprogress = Tournament.objects.filter(completed=False)\n finished = Tournament.objects.filter(completed=True)\n model = Context({'teams': get_team_groups(), 'in_progress':\n inprogress, 'finished': finished})\n t = loader.get_template('tournament/create_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tournament = Tournament()\n tournament.completed = False\n tournament.save()\n for team_id in request.POST.getlist('participant'):\n if team_id != '':\n team = Team.objects.get(id=team_id)\n tourney_team = TournamentTeam()\n tourney_team.tournament = tournament\n tourney_team.team = team\n tourney_team.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % str(tournament.id))\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef view_tournament(request):\n\n def get():\n tourney = Tournament.objects.get(id=request.GET['tournament'])\n pending_teams = []\n teams = []\n for team in tourney.tourney_team_set.all():\n if team.matchup_index == None:\n pending_teams.append(team.team)\n else:\n teams.append(team.team)\n matches = [[i for i in range(0, 4)], [i for i in range(0, 2)], [0]]\n for match in tourney.tourney_match_set.all():\n matches[match.round][match.index] = match\n model = Context({'pending_teams': pending_teams, 'teams': teams,\n 'matches': matches, 'tourney': tourney})\n t = loader.get_template('tournament/view_tournament.html')\n return HttpResponse(t.render(model))\n\n @transaction.commit_on_success\n def post():\n tourney_id = request.GET['tournament']\n tourney = Tournament.objects.get(id=tourney_id)\n versus = request.POST.getlist('versus')\n teams = []\n for team_id in versus:\n if team_id != '':\n teams.append(Team.objects.get(id=team_id))\n existing_matches = TournamentMatchup.objects.filter(tournament=tourney)\n match = Matchup()\n match.team1 = teams[0]\n match.team2 = teams[1]\n match.save()\n tourney_match = TournamentMatchup()\n tourney_match.tournament = tourney\n tourney_match.matchup = match\n tourney_match.round = 0\n tourney_match.index = existing_matches.count()\n tourney_match.save()\n tourney_teams = []\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[0]).get())\n tourney_teams.append(TournamentTeam.objects.filter(tournament=\n tourney).filter(team=teams[1]).get())\n tourney_teams[0].matchup_index = tourney_match.index * 2\n tourney_teams[1].matchup_index = tourney_match.index * 2 + 1\n tourney_teams[0].save()\n tourney_teams[1].save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % tourney_id)\n if request.method == 'POST':\n return post()\n else:\n return get()\n\n\ndef result_tournament(request):\n\n @transaction.commit_on_success\n def post():\n tournament_match_id = request.GET['tournament_match_key']\n match = TournamentMatchup.objects.get(id=tournament_match_id)\n winner_id = int(request.POST['winner'])\n matchup = match.matchup\n result = MatchResult()\n if winner_id == matchup.team1.id:\n result.winner = matchup.team1\n result.loser = matchup.team2\n elif winner_id == matchup.team2.id:\n result.winner = matchup.team2\n result.loser = matchup.team1\n else:\n raise Exception('could not determine winner key: %s (%s, %s)' %\n (winner_id, matchup.team1.id, matchup.team2.id))\n update_stats(result.winner, result.loser)\n result.save()\n next_round_indices = {(0): 0, (1): 0, (2): 1, (3): 1}\n next_round_index = next_round_indices[match.index]\n next_round = match.round + 1\n if match.round < 2:\n existing = TournamentMatchup.objects.filter(tournament=match.\n tournament).filter(round=next_round).filter(index=\n next_round_index)\n if existing.count() == 1:\n next_match = existing[0]\n next_matchup = next_match.matchup\n next_matchup.team2 = result.winner\n next_matchup.save()\n elif existing.count() == 0:\n next_match = TournamentMatchup()\n next_matchup = Matchup()\n next_matchup.team1 = result.winner\n next_matchup.save()\n next_match.tournament = match.tournament\n next_match.round = next_round\n next_match.index = next_round_index\n next_match.matchup = next_matchup\n next_match.save()\n else:\n tourney = match.tournament\n tourney.completed = True\n tourney.winner = result.winner\n tourney.save()\n match.matchup.delete()\n match.matchup = None\n match.result = result\n match.save()\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % match.tournament.id)\n if request.method == 'POST':\n return post()\n else:\n return HttpResponseRedirect(\n '/app/tournament/matchups?tournament=%s' % request.GET[\n 'tournament'])\n\n\ndef result_detail(request):\n result_id = request.GET['match']\n match = MatchResult.objects.get(id=result_id)\n model = Context({'match': match})\n t = loader.get_template('result_detail.html')\n return HttpResponse(t.render(model))\n\n\ndef get_team_groups():\n teams = Team.objects.all()\n team_groups = {}\n for team in teams:\n if not team.leader in team_groups:\n team_groups[team.leader] = []\n team_groups[team.leader].append(team)\n team_groups = [sorted(team_groups[k], lambda x, y: cmp(x.id, y.id)) for\n k in sorted(team_groups.keys(), lambda x, y: cmp(x.name, y.name))]\n return team_groups\n\n\ndef update_stats(winner, loser):\n existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id,\n loser.id]) & Q(team2__in=[winner.id, loser.id]))\n stats = None\n if existing.count() == 0:\n newStats = MatchupStatistics()\n newStats.team1 = winner\n newStats.team2 = loser\n newStats.team1_wins = 1\n newStats.team2_wins = 0\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n newStats.save()\n winner.save()\n loser.save()\n return 1, 0\n elif existing.count() == 1:\n oldStats = existing.fetch(1)[0]\n if oldStats.team1.id == winner.id:\n oldStats.team1_wins = oldStats.team1_wins + 1\n else:\n oldStats.team2_wins = oldStats.team2_wins + 1\n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n oldStats.save()\n winner.save()\n loser.save()\n return 0, 1\n else:\n logging.error(\n 'unexpected state: %s matchup statistics for the same team pair (expected 1)'\n % existing.count())\n return 0, 0\n",
"step-5": "# Create your views here.\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import Context, loader\nfrom django.db import transaction\nfrom django.db.models import Q\n\nfrom maximus.models import Mercenary, Team, TeamMember, Tournament, TournamentTeam, TournamentMatchup, Matchup, MatchupStatistics, MatchResult\n\ndef index(request):\n model = Context({})\n t = loader.get_template('index.html')\n return HttpResponse(t.render(model))\n\ndef create_team(request): \n def get():\n heroes = Mercenary.objects.filter(type='HERO')\n pawns = Mercenary.objects.filter(type='PAWN')\n\n model = Context({ 'heroes': heroes, 'pawns': pawns, 'mercrange': range(1,7), 'teams': get_team_groups() })\n t = loader.get_template('teams.html')\n return HttpResponse(t.render(model))\n \n def post():\n team = Team()\n class_c = request.POST['hero']\n leader = Mercenary.objects.filter(type='HERO').filter(name=class_c)\n team.leader = leader[0]\n team.wins = 0\n team.losses = 0\n team.notes = \"\"\n team.save()\n for i in range(1,10):\n who = request.POST['pawn%s' % i]\n if who != '':\n merc = Mercenary.objects.filter(type='PAWN').filter(name=who)\n current = TeamMember()\n current.team = team\n current.merc = merc[0]\n current.location = i\n current.save()\n \n return HttpResponseRedirect('/app/teams')\n \n if request.method == \"POST\":\n return post()\n else:\n return get()\n \ndef edit_team(request):\n def get():\n team_id = request.GET[\"team\"]\n team = Team.objects.get(id=team_id)\n \n model = Context({ 'team': team })\n t = loader.get_template('edit_team.html')\n return HttpResponse(t.render(model))\n \n def post():\n new_notes = request.POST[\"notes\"]\n team_id = request.POST[\"team\"]\n \n team = Team.objects.get(id=team_id)\n team.notes = new_notes\n team.save()\n return HttpResponseRedirect('/app/teams/edit?team=%s' % team_id)\n \n if request.method == \"POST\":\n return post()\n else:\n return get() \n\ndef create_tournament(request):\n def get():\n inprogress = Tournament.objects.filter(completed=False);\n finished = Tournament.objects.filter(completed=True);\n model = Context({ 'teams': get_team_groups(), \"in_progress\": inprogress, \"finished\": finished })\n t = loader.get_template('tournament/create_tournament.html')\n return HttpResponse(t.render(model))\n \n @transaction.commit_on_success\n def post():\n tournament = Tournament()\n tournament.completed = False\n \n tournament.save()\n for team_id in request.POST.getlist('participant'):\n if team_id != \"\":\n team = Team.objects.get(id=team_id)\n tourney_team = TournamentTeam()\n tourney_team.tournament = tournament\n tourney_team.team = team\n tourney_team.save()\n \n return HttpResponseRedirect('/app/tournament/matchups?tournament=%s' % str(tournament.id))\n \n if request.method == \"POST\":\n return post()\n else:\n return get() \n\ndef view_tournament(request):\n def get():\n tourney = Tournament.objects.get(id=request.GET[\"tournament\"])\n pending_teams = []\n teams = []\n for team in tourney.tourney_team_set.all():\n if team.matchup_index == None:\n pending_teams.append(team.team)\n else:\n teams.append(team.team) \n matches = [[i for i in range(0,4)],[i for i in range(0,2)],[0]]\n for match in tourney.tourney_match_set.all():\n matches[match.round][match.index] = match\n \n model = Context({ \"pending_teams\": pending_teams, \"teams\": teams, \"matches\": matches, \"tourney\": tourney})\n \n t = loader.get_template('tournament/view_tournament.html')\n return HttpResponse(t.render(model))\n \n @transaction.commit_on_success\n def post():\n tourney_id = request.GET[\"tournament\"]\n tourney = Tournament.objects.get(id=tourney_id)\n versus = request.POST.getlist(\"versus\")\n teams = []\n for team_id in versus:\n if team_id != \"\":\n teams.append(Team.objects.get(id=team_id))\n \n existing_matches = TournamentMatchup.objects.filter(tournament=tourney)\n \n match = Matchup()\n match.team1 = teams[0]\n match.team2 = teams[1]\n match.save()\n \n tourney_match = TournamentMatchup()\n tourney_match.tournament = tourney\n tourney_match.matchup = match\n tourney_match.round = 0\n tourney_match.index = existing_matches.count()\n tourney_match.save()\n \n tourney_teams = []\n tourney_teams.append(TournamentTeam.objects.filter(tournament=tourney).filter(team=teams[0]).get())\n tourney_teams.append(TournamentTeam.objects.filter(tournament=tourney).filter(team=teams[1]).get())\n \n tourney_teams[0].matchup_index = tourney_match.index * 2\n tourney_teams[1].matchup_index = tourney_match.index * 2 + 1\n \n tourney_teams[0].save();\n tourney_teams[1].save();\n \n return HttpResponseRedirect(\"/app/tournament/matchups?tournament=%s\" % tourney_id)\n \n if request.method == \"POST\":\n return post()\n else:\n return get()\n\ndef result_tournament(request):\n @transaction.commit_on_success\n def post():\n tournament_match_id = request.GET['tournament_match_key']\n match = TournamentMatchup.objects.get(id=tournament_match_id)\n\n winner_id = int(request.POST['winner'])\n matchup = match.matchup\n result = MatchResult()\n if winner_id == matchup.team1.id:\n result.winner = matchup.team1\n result.loser = matchup.team2\n elif winner_id == matchup.team2.id:\n result.winner = matchup.team2\n result.loser = matchup.team1\n else:\n raise Exception(\"could not determine winner key: %s (%s, %s)\" % (winner_id, matchup.team1.id, matchup.team2.id))\n \n update_stats(result.winner, result.loser)\n result.save()\n \n next_round_indices = {0:0, 1:0, 2:1, 3:1}\n next_round_index = next_round_indices[match.index]\n next_round = match.round + 1\n if match.round < 2:\n # look in existing matches for this winner's opponent\n existing = TournamentMatchup.objects.filter(tournament=match.tournament).filter(round=next_round).filter(index=next_round_index)\n if existing.count() == 1:\n next_match = existing[0]\n next_matchup = next_match.matchup\n next_matchup.team2 = result.winner\n next_matchup.save()\n elif existing.count() == 0:\n next_match = TournamentMatchup()\n next_matchup = Matchup()\n next_matchup.team1 = result.winner\n next_matchup.save()\n \n next_match.tournament = match.tournament\n next_match.round = next_round\n next_match.index = next_round_index\n next_match.matchup = next_matchup\n next_match.save()\n else:\n tourney = match.tournament\n tourney.completed = True\n tourney.winner = result.winner\n tourney.save()\n \n match.matchup.delete()\n match.matchup = None\n match.result = result\n match.save()\n \n return HttpResponseRedirect(\"/app/tournament/matchups?tournament=%s\" % match.tournament.id)\n \n if request.method == \"POST\":\n return post()\n else:\n return HttpResponseRedirect(\"/app/tournament/matchups?tournament=%s\" % request.GET[\"tournament\"]) \n\ndef result_detail(request):\n result_id = request.GET['match']\n match = MatchResult.objects.get(id=result_id)\n\n model = Context({ 'match': match })\n \n t = loader.get_template('result_detail.html')\n return HttpResponse(t.render(model))\n \ndef get_team_groups():\n teams = Team.objects.all()\n team_groups = { }\n for team in teams:\n if not team.leader in team_groups:\n team_groups[team.leader] = []\n team_groups[team.leader].append(team)\n \n team_groups = [sorted(team_groups[k], lambda x,y: cmp(x.id, y.id)) for k in sorted(team_groups.keys(), lambda x,y: cmp(x.name, y.name))]\n return team_groups\n\ndef update_stats(winner, loser):\n existing = MatchupStatistics.objects.filter(Q(team1__in=[winner.id, loser.id]) & Q(team2__in=[winner.id, loser.id]))\n stats = None\n if existing.count() == 0:\n newStats = MatchupStatistics()\n newStats.team1 = winner\n newStats.team2 = loser\n newStats.team1_wins = 1\n newStats.team2_wins = 0\n \n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n \n newStats.save()\n winner.save()\n loser.save()\n return (1, 0)\n elif existing.count() == 1:\n oldStats = existing.fetch(1)[0]\n if oldStats.team1.id == winner.id:\n oldStats.team1_wins = oldStats.team1_wins + 1\n else:\n oldStats.team2_wins = oldStats.team2_wins + 1\n \n winner.wins = winner.wins + 1\n loser.losses = loser.losses + 1\n oldStats.save()\n winner.save()\n loser.save()\n \n return (0, 1)\n else:\n logging.error(\"unexpected state: %s matchup statistics for the same team pair (expected 1)\" % existing.count())\n return (0, 0)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
import numpy as np
import matplotlib.pyplot as plt
conf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235,
11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,
182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223,
4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]
conf_arr = np.transpose(np.array(conf_arr))
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j) / float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=
'center', verticalalignment='center')
cb = fig.colorbar(res)
alphabet = '0123456789'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.savefig('confusion_matrix.png', format='png')
|
normal
|
{
"blob_id": "923a2979df3c37583eec712880ad821541bd898b",
"index": 8735,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\n<mask token>\nplt.clf()\n<mask token>\nax.set_aspect(1)\n<mask token>\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\n<mask token>\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-3": "<mask token>\nconf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235, \n 11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,\n 182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223, \n 4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]\nconf_arr = np.transpose(np.array(conf_arr))\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')\nwidth, height = conf_arr.shape\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\ncb = fig.colorbar(res)\nalphabet = '0123456789'\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nconf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235, \n 11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,\n 182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223, \n 4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]\nconf_arr = np.transpose(np.array(conf_arr))\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')\nwidth, height = conf_arr.shape\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\ncb = fig.colorbar(res)\nalphabet = '0123456789'\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', views.index, name='index'), path('login', Login.
as_view(), name='login'), path('logout', logout, name='logout'), path(
'cart/', views.cart, name='cart'), path('order/', views.order, name=
'order'), path('check-out', views.CheckOut, name='checkout'), path(
'track/', views.tracker, name='tracker'), path('search/', views.search,
name='search'), path('checkout/', views.check, name='checkout'), path(
'productview/', views.proview, name='see')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import views
from .views import index
from .views import Login, logout
from .views import CheckOut
urlpatterns = [path('', views.index, name='index'), path('login', Login.
as_view(), name='login'), path('logout', logout, name='logout'), path(
'cart/', views.cart, name='cart'), path('order/', views.order, name=
'order'), path('check-out', views.CheckOut, name='checkout'), path(
'track/', views.tracker, name='tracker'), path('search/', views.search,
name='search'), path('checkout/', views.check, name='checkout'), path(
'productview/', views.proview, name='see')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import views
from .views import index
from .views import Login , logout
from .views import CheckOut
urlpatterns = [
path("",views.index, name="index"),
path('login', Login.as_view(), name='login'),
path('logout', logout , name='logout'),
path("cart/",views.cart , name="cart"),
path("order/",views.order , name="order"),
path('check-out', views.CheckOut , name='checkout'),
path("track/",views.tracker, name="tracker"),
path("search/",views.search, name="search"),
path("checkout/",views.check, name="checkout"),
path("productview/",views.proview, name="see"),
]
|
flexible
|
{
"blob_id": "c8aa93a33a6513129b4980180c4eb8d5d5eb3b5b",
"index": 2592,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.index, name='index'), path('login', Login.\n as_view(), name='login'), path('logout', logout, name='logout'), path(\n 'cart/', views.cart, name='cart'), path('order/', views.order, name=\n 'order'), path('check-out', views.CheckOut, name='checkout'), path(\n 'track/', views.tracker, name='tracker'), path('search/', views.search,\n name='search'), path('checkout/', views.check, name='checkout'), path(\n 'productview/', views.proview, name='see')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom .views import index\nfrom .views import Login, logout\nfrom .views import CheckOut\nurlpatterns = [path('', views.index, name='index'), path('login', Login.\n as_view(), name='login'), path('logout', logout, name='logout'), path(\n 'cart/', views.cart, name='cart'), path('order/', views.order, name=\n 'order'), path('check-out', views.CheckOut, name='checkout'), path(\n 'track/', views.tracker, name='tracker'), path('search/', views.search,\n name='search'), path('checkout/', views.check, name='checkout'), path(\n 'productview/', views.proview, name='see')]\n",
"step-4": "\r\n\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom . import views\r\nfrom .views import index\r\nfrom .views import Login , logout\r\n\r\nfrom .views import CheckOut\r\n\r\n\r\n\r\nurlpatterns = [\r\n path(\"\",views.index, name=\"index\"),\r\n \r\n path('login', Login.as_view(), name='login'),\r\n path('logout', logout , name='logout'),\r\n path(\"cart/\",views.cart , name=\"cart\"),\r\n path(\"order/\",views.order , name=\"order\"),\r\n \r\n path('check-out', views.CheckOut , name='checkout'),\r\n path(\"track/\",views.tracker, name=\"tracker\"),\r\n path(\"search/\",views.search, name=\"search\"),\r\n path(\"checkout/\",views.check, name=\"checkout\"),\r\n path(\"productview/\",views.proview, name=\"see\"),\r\n]\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SchematicsPlugin(BasePlugin):
<|reserved_special_token_0|>
def __init__(self, schema_name_resolver=None):
super().__init__()
self.schema_name_resolver = schema_name_resolver or resolver
self.spec = None
self.openapi_version = None
self.openapi = None
def init_spec(self, spec):
super().init_spec(spec)
self.spec = spec
self.openapi_version = spec.openapi_version
self.openapi = OpenAPIConverter(openapi_version=spec.
openapi_version, schema_name_resolver=self.schema_name_resolver,
spec=spec)
def resolve_parameters(self, parameters):
resolved = []
for parameter in parameters:
if isinstance(parameter, dict) and not isinstance(parameter.get
('schema', {}), dict):
schema_instance = resolve_schema_instance(parameter['schema'])
if 'in' in parameter:
del parameter['schema']
resolved += self.openapi.schema2parameters(schema_instance,
default_in=parameter.pop('in'), **parameter)
continue
self.resolve_schema(parameter)
resolved.append(parameter)
return resolved
def resolve_schema_in_request_body(self, request_body):
"""Function to resolve a schema in a requestBody object - modifies then
response dict to convert Marshmallow Schema object or class into dict
"""
content = request_body['content']
for content_type in content:
schema = content[content_type]['schema']
content[content_type]['schema'] = self.openapi.resolve_schema_dict(
schema)
def resolve_schema(self, data):
"""Function to resolve a schema in a parameter or response - modifies the
corresponding dict to convert Marshmallow Schema object or class into dict
:param APISpec spec: `APISpec` containing refs.
:param dict|str data: either a parameter or response dictionary that may
contain a schema, or a reference provided as string
"""
if not isinstance(data, dict):
return
if 'schema' in data:
data['schema'] = self.openapi.resolve_schema_dict(data['schema'])
if self.openapi_version.major >= 3:
if 'content' in data:
for content_type in data['content']:
schema = data['content'][content_type]['schema']
data['content'][content_type]['schema'
] = self.openapi.resolve_schema_dict(schema)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def response_helper(self, response, **kwargs):
"""Response component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in response definition.
:param dict parameter: response fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
return response
def operation_helper(self, operations, **kwargs):
for operation in operations.values():
if not isinstance(operation, dict):
continue
if 'parameters' in operation:
operation['parameters'] = self.resolve_parameters(operation
['parameters'])
if self.openapi_version.major >= 3:
if 'requestBody' in operation:
self.resolve_schema_in_request_body(operation[
'requestBody'])
for response in operation.get('responses', {}).values():
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
def warn_if_schema_already_in_spec(self, schema_key):
"""Method to warn the user if the schema has already been added to the
spec.
"""
if schema_key in self.openapi.refs:
warnings.warn(
'{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'
.format(schema_key[0]), UserWarning)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SchematicsPlugin(BasePlugin):
<|reserved_special_token_0|>
def __init__(self, schema_name_resolver=None):
super().__init__()
self.schema_name_resolver = schema_name_resolver or resolver
self.spec = None
self.openapi_version = None
self.openapi = None
def init_spec(self, spec):
super().init_spec(spec)
self.spec = spec
self.openapi_version = spec.openapi_version
self.openapi = OpenAPIConverter(openapi_version=spec.
openapi_version, schema_name_resolver=self.schema_name_resolver,
spec=spec)
def resolve_parameters(self, parameters):
resolved = []
for parameter in parameters:
if isinstance(parameter, dict) and not isinstance(parameter.get
('schema', {}), dict):
schema_instance = resolve_schema_instance(parameter['schema'])
if 'in' in parameter:
del parameter['schema']
resolved += self.openapi.schema2parameters(schema_instance,
default_in=parameter.pop('in'), **parameter)
continue
self.resolve_schema(parameter)
resolved.append(parameter)
return resolved
def resolve_schema_in_request_body(self, request_body):
"""Function to resolve a schema in a requestBody object - modifies then
response dict to convert Marshmallow Schema object or class into dict
"""
content = request_body['content']
for content_type in content:
schema = content[content_type]['schema']
content[content_type]['schema'] = self.openapi.resolve_schema_dict(
schema)
def resolve_schema(self, data):
"""Function to resolve a schema in a parameter or response - modifies the
corresponding dict to convert Marshmallow Schema object or class into dict
:param APISpec spec: `APISpec` containing refs.
:param dict|str data: either a parameter or response dictionary that may
contain a schema, or a reference provided as string
"""
if not isinstance(data, dict):
return
if 'schema' in data:
data['schema'] = self.openapi.resolve_schema_dict(data['schema'])
if self.openapi_version.major >= 3:
if 'content' in data:
for content_type in data['content']:
schema = data['content'][content_type]['schema']
data['content'][content_type]['schema'
] = self.openapi.resolve_schema_dict(schema)
def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
Examples: ::
@ma_plugin.map_to_openapi_type('string', 'uuid')
class MyCustomField(Integer):
# ...
@ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')
class MyCustomFieldThatsKindaLikeAnInteger(Integer):
# ...
"""
return self.openapi.map_to_openapi_type(*args)
def schema_helper(self, name, _, schema=None, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide OpenAPI
metadata.
:param type|Schema schema: A marshmallow Schema class or instance.
"""
if schema is None:
return None
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
self.warn_if_schema_already_in_spec(schema_key)
self.openapi.refs[schema_key] = name
json_schema = self.openapi.schema2jsonschema(schema_instance)
return json_schema
def parameter_helper(self, parameter, **kwargs):
"""Parameter component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in parameter definition.
:param dict parameter: parameter fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(parameter)
return parameter
def response_helper(self, response, **kwargs):
"""Response component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in response definition.
:param dict parameter: response fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
return response
def operation_helper(self, operations, **kwargs):
for operation in operations.values():
if not isinstance(operation, dict):
continue
if 'parameters' in operation:
operation['parameters'] = self.resolve_parameters(operation
['parameters'])
if self.openapi_version.major >= 3:
if 'requestBody' in operation:
self.resolve_schema_in_request_body(operation[
'requestBody'])
for response in operation.get('responses', {}).values():
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
def warn_if_schema_already_in_spec(self, schema_key):
"""Method to warn the user if the schema has already been added to the
spec.
"""
if schema_key in self.openapi.refs:
warnings.warn(
'{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'
.format(schema_key[0]), UserWarning)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SchematicsPlugin(BasePlugin):
"""APISpec plugin handling schematics models
:param callable schema_name_resolver: Callable to generate the schema definition name.
Receives the `Schema` class and returns the name to be used in refs within
the generated spec. When working with circular referencing this function
must must not return `None` for schemas in a circular reference chain.
Example: ::
def schema_name_resolver(schema):
return schema.__name__
"""
def __init__(self, schema_name_resolver=None):
super().__init__()
self.schema_name_resolver = schema_name_resolver or resolver
self.spec = None
self.openapi_version = None
self.openapi = None
def init_spec(self, spec):
super().init_spec(spec)
self.spec = spec
self.openapi_version = spec.openapi_version
self.openapi = OpenAPIConverter(openapi_version=spec.
openapi_version, schema_name_resolver=self.schema_name_resolver,
spec=spec)
def resolve_parameters(self, parameters):
resolved = []
for parameter in parameters:
if isinstance(parameter, dict) and not isinstance(parameter.get
('schema', {}), dict):
schema_instance = resolve_schema_instance(parameter['schema'])
if 'in' in parameter:
del parameter['schema']
resolved += self.openapi.schema2parameters(schema_instance,
default_in=parameter.pop('in'), **parameter)
continue
self.resolve_schema(parameter)
resolved.append(parameter)
return resolved
def resolve_schema_in_request_body(self, request_body):
"""Function to resolve a schema in a requestBody object - modifies then
response dict to convert Marshmallow Schema object or class into dict
"""
content = request_body['content']
for content_type in content:
schema = content[content_type]['schema']
content[content_type]['schema'] = self.openapi.resolve_schema_dict(
schema)
def resolve_schema(self, data):
"""Function to resolve a schema in a parameter or response - modifies the
corresponding dict to convert Marshmallow Schema object or class into dict
:param APISpec spec: `APISpec` containing refs.
:param dict|str data: either a parameter or response dictionary that may
contain a schema, or a reference provided as string
"""
if not isinstance(data, dict):
return
if 'schema' in data:
data['schema'] = self.openapi.resolve_schema_dict(data['schema'])
if self.openapi_version.major >= 3:
if 'content' in data:
for content_type in data['content']:
schema = data['content'][content_type]['schema']
data['content'][content_type]['schema'
] = self.openapi.resolve_schema_dict(schema)
def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
Examples: ::
@ma_plugin.map_to_openapi_type('string', 'uuid')
class MyCustomField(Integer):
# ...
@ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')
class MyCustomFieldThatsKindaLikeAnInteger(Integer):
# ...
"""
return self.openapi.map_to_openapi_type(*args)
def schema_helper(self, name, _, schema=None, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide OpenAPI
metadata.
:param type|Schema schema: A marshmallow Schema class or instance.
"""
if schema is None:
return None
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
self.warn_if_schema_already_in_spec(schema_key)
self.openapi.refs[schema_key] = name
json_schema = self.openapi.schema2jsonschema(schema_instance)
return json_schema
def parameter_helper(self, parameter, **kwargs):
"""Parameter component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in parameter definition.
:param dict parameter: parameter fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(parameter)
return parameter
def response_helper(self, response, **kwargs):
"""Response component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in response definition.
:param dict parameter: response fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
return response
def operation_helper(self, operations, **kwargs):
for operation in operations.values():
if not isinstance(operation, dict):
continue
if 'parameters' in operation:
operation['parameters'] = self.resolve_parameters(operation
['parameters'])
if self.openapi_version.major >= 3:
if 'requestBody' in operation:
self.resolve_schema_in_request_body(operation[
'requestBody'])
for response in operation.get('responses', {}).values():
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
def warn_if_schema_already_in_spec(self, schema_key):
"""Method to warn the user if the schema has already been added to the
spec.
"""
if schema_key in self.openapi.refs:
warnings.warn(
'{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'
.format(schema_key[0]), UserWarning)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def resolver(schema):
"""Default implementation of a schema name resolver function
"""
name = schema.__name__
if name.endswith('Schema'):
return name[:-6] or name
return name
class SchematicsPlugin(BasePlugin):
"""APISpec plugin handling schematics models
:param callable schema_name_resolver: Callable to generate the schema definition name.
Receives the `Schema` class and returns the name to be used in refs within
the generated spec. When working with circular referencing this function
must must not return `None` for schemas in a circular reference chain.
Example: ::
def schema_name_resolver(schema):
return schema.__name__
"""
def __init__(self, schema_name_resolver=None):
super().__init__()
self.schema_name_resolver = schema_name_resolver or resolver
self.spec = None
self.openapi_version = None
self.openapi = None
def init_spec(self, spec):
super().init_spec(spec)
self.spec = spec
self.openapi_version = spec.openapi_version
self.openapi = OpenAPIConverter(openapi_version=spec.
openapi_version, schema_name_resolver=self.schema_name_resolver,
spec=spec)
def resolve_parameters(self, parameters):
resolved = []
for parameter in parameters:
if isinstance(parameter, dict) and not isinstance(parameter.get
('schema', {}), dict):
schema_instance = resolve_schema_instance(parameter['schema'])
if 'in' in parameter:
del parameter['schema']
resolved += self.openapi.schema2parameters(schema_instance,
default_in=parameter.pop('in'), **parameter)
continue
self.resolve_schema(parameter)
resolved.append(parameter)
return resolved
def resolve_schema_in_request_body(self, request_body):
"""Function to resolve a schema in a requestBody object - modifies then
response dict to convert Marshmallow Schema object or class into dict
"""
content = request_body['content']
for content_type in content:
schema = content[content_type]['schema']
content[content_type]['schema'] = self.openapi.resolve_schema_dict(
schema)
def resolve_schema(self, data):
"""Function to resolve a schema in a parameter or response - modifies the
corresponding dict to convert Marshmallow Schema object or class into dict
:param APISpec spec: `APISpec` containing refs.
:param dict|str data: either a parameter or response dictionary that may
contain a schema, or a reference provided as string
"""
if not isinstance(data, dict):
return
if 'schema' in data:
data['schema'] = self.openapi.resolve_schema_dict(data['schema'])
if self.openapi_version.major >= 3:
if 'content' in data:
for content_type in data['content']:
schema = data['content'][content_type]['schema']
data['content'][content_type]['schema'
] = self.openapi.resolve_schema_dict(schema)
def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
Examples: ::
@ma_plugin.map_to_openapi_type('string', 'uuid')
class MyCustomField(Integer):
# ...
@ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')
class MyCustomFieldThatsKindaLikeAnInteger(Integer):
# ...
"""
return self.openapi.map_to_openapi_type(*args)
def schema_helper(self, name, _, schema=None, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide OpenAPI
metadata.
:param type|Schema schema: A marshmallow Schema class or instance.
"""
if schema is None:
return None
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
self.warn_if_schema_already_in_spec(schema_key)
self.openapi.refs[schema_key] = name
json_schema = self.openapi.schema2jsonschema(schema_instance)
return json_schema
def parameter_helper(self, parameter, **kwargs):
"""Parameter component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in parameter definition.
:param dict parameter: parameter fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(parameter)
return parameter
def response_helper(self, response, **kwargs):
"""Response component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in response definition.
:param dict parameter: response fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
return response
def operation_helper(self, operations, **kwargs):
for operation in operations.values():
if not isinstance(operation, dict):
continue
if 'parameters' in operation:
operation['parameters'] = self.resolve_parameters(operation
['parameters'])
if self.openapi_version.major >= 3:
if 'requestBody' in operation:
self.resolve_schema_in_request_body(operation[
'requestBody'])
for response in operation.get('responses', {}).values():
self.resolve_schema(response)
if 'headers' in response:
for header in response['headers'].values():
self.resolve_schema(header)
def warn_if_schema_already_in_spec(self, schema_key):
"""Method to warn the user if the schema has already been added to the
spec.
"""
if schema_key in self.openapi.refs:
warnings.warn(
'{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'
.format(schema_key[0]), UserWarning)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
openapi.schematics
~~~~~~~~~~~~~~~~~~
Schematics plugin for apispec based on ext.MarshmallowPlugin
"""
import warnings
from apispec import BasePlugin
from .common import resolve_schema_instance, make_schema_key
from .openapi import OpenAPIConverter
def resolver(schema):
"""Default implementation of a schema name resolver function
"""
name = schema.__name__
if name.endswith("Schema"):
return name[:-6] or name
return name
class SchematicsPlugin(BasePlugin):
"""APISpec plugin handling schematics models
:param callable schema_name_resolver: Callable to generate the schema definition name.
Receives the `Schema` class and returns the name to be used in refs within
the generated spec. When working with circular referencing this function
must must not return `None` for schemas in a circular reference chain.
Example: ::
def schema_name_resolver(schema):
return schema.__name__
"""
def __init__(self, schema_name_resolver=None):
super().__init__()
self.schema_name_resolver = schema_name_resolver or resolver
self.spec = None
self.openapi_version = None
self.openapi = None
def init_spec(self, spec):
super().init_spec(spec)
self.spec = spec
self.openapi_version = spec.openapi_version
self.openapi = OpenAPIConverter(
openapi_version=spec.openapi_version,
schema_name_resolver=self.schema_name_resolver,
spec=spec,
)
def resolve_parameters(self, parameters):
resolved = []
for parameter in parameters:
if isinstance(parameter, dict) and not isinstance(
parameter.get("schema", {}), dict
):
schema_instance = resolve_schema_instance(parameter["schema"])
if "in" in parameter:
del parameter["schema"]
resolved += self.openapi.schema2parameters(
schema_instance, default_in=parameter.pop("in"), **parameter
)
continue
self.resolve_schema(parameter)
resolved.append(parameter)
return resolved
def resolve_schema_in_request_body(self, request_body):
"""Function to resolve a schema in a requestBody object - modifies then
response dict to convert Marshmallow Schema object or class into dict
"""
content = request_body["content"]
for content_type in content:
schema = content[content_type]["schema"]
content[content_type]["schema"] = self.openapi.resolve_schema_dict(schema)
def resolve_schema(self, data):
"""Function to resolve a schema in a parameter or response - modifies the
corresponding dict to convert Marshmallow Schema object or class into dict
:param APISpec spec: `APISpec` containing refs.
:param dict|str data: either a parameter or response dictionary that may
contain a schema, or a reference provided as string
"""
if not isinstance(data, dict):
return
# OAS 2 component or OAS 3 header
if "schema" in data:
data["schema"] = self.openapi.resolve_schema_dict(data["schema"])
# OAS 3 component except header
if self.openapi_version.major >= 3:
if "content" in data:
for content_type in data["content"]:
schema = data["content"][content_type]["schema"]
data["content"][content_type][
"schema"
] = self.openapi.resolve_schema_dict(schema)
def map_to_openapi_type(self, *args):
"""Decorator to set mapping for custom fields.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
Examples: ::
@ma_plugin.map_to_openapi_type('string', 'uuid')
class MyCustomField(Integer):
# ...
@ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')
class MyCustomFieldThatsKindaLikeAnInteger(Integer):
# ...
"""
return self.openapi.map_to_openapi_type(*args)
def schema_helper(self, name, _, schema=None, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide OpenAPI
metadata.
:param type|Schema schema: A marshmallow Schema class or instance.
"""
if schema is None:
return None
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
self.warn_if_schema_already_in_spec(schema_key)
self.openapi.refs[schema_key] = name
json_schema = self.openapi.schema2jsonschema(schema_instance)
return json_schema
def parameter_helper(self, parameter, **kwargs):
"""Parameter component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in parameter definition.
:param dict parameter: parameter fields. May contain a marshmallow
Schema class or instance.
"""
# In OpenAPIv3, this only works when using the complex form using "content"
self.resolve_schema(parameter)
return parameter
def response_helper(self, response, **kwargs):
"""Response component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in response definition.
:param dict parameter: response fields. May contain a marshmallow
Schema class or instance.
"""
self.resolve_schema(response)
if "headers" in response:
for header in response["headers"].values():
self.resolve_schema(header)
return response
def operation_helper(self, operations, **kwargs):
for operation in operations.values():
if not isinstance(operation, dict):
continue
if "parameters" in operation:
operation["parameters"] = self.resolve_parameters(
operation["parameters"]
)
if self.openapi_version.major >= 3:
if "requestBody" in operation:
self.resolve_schema_in_request_body(operation["requestBody"])
for response in operation.get("responses", {}).values():
self.resolve_schema(response)
if "headers" in response:
for header in response["headers"].values():
self.resolve_schema(header)
def warn_if_schema_already_in_spec(self, schema_key):
"""Method to warn the user if the schema has already been added to the
spec.
"""
if schema_key in self.openapi.refs:
warnings.warn(
"{} has already been added to the spec. Adding it twice may "
"cause references to not resolve properly.".format(schema_key[0]),
UserWarning,
)
|
flexible
|
{
"blob_id": "1c5655563d05498f016fb2d41a07331b9e8de5e8",
"index": 2019,
"step-1": "<mask token>\n\n\nclass SchematicsPlugin(BasePlugin):\n <mask token>\n\n def __init__(self, schema_name_resolver=None):\n super().__init__()\n self.schema_name_resolver = schema_name_resolver or resolver\n self.spec = None\n self.openapi_version = None\n self.openapi = None\n\n def init_spec(self, spec):\n super().init_spec(spec)\n self.spec = spec\n self.openapi_version = spec.openapi_version\n self.openapi = OpenAPIConverter(openapi_version=spec.\n openapi_version, schema_name_resolver=self.schema_name_resolver,\n spec=spec)\n\n def resolve_parameters(self, parameters):\n resolved = []\n for parameter in parameters:\n if isinstance(parameter, dict) and not isinstance(parameter.get\n ('schema', {}), dict):\n schema_instance = resolve_schema_instance(parameter['schema'])\n if 'in' in parameter:\n del parameter['schema']\n resolved += self.openapi.schema2parameters(schema_instance,\n default_in=parameter.pop('in'), **parameter)\n continue\n self.resolve_schema(parameter)\n resolved.append(parameter)\n return resolved\n\n def resolve_schema_in_request_body(self, request_body):\n \"\"\"Function to resolve a schema in a requestBody object - modifies then\n response dict to convert Marshmallow Schema object or class into dict\n \"\"\"\n content = request_body['content']\n for content_type in content:\n schema = content[content_type]['schema']\n content[content_type]['schema'] = self.openapi.resolve_schema_dict(\n schema)\n\n def resolve_schema(self, data):\n \"\"\"Function to resolve a schema in a parameter or response - modifies the\n corresponding dict to convert Marshmallow Schema object or class into dict\n\n :param APISpec spec: `APISpec` containing refs.\n :param dict|str data: either a parameter or response dictionary that may\n contain a schema, or a reference provided as string\n \"\"\"\n if not isinstance(data, dict):\n return\n if 'schema' in data:\n data['schema'] = self.openapi.resolve_schema_dict(data['schema'])\n if self.openapi_version.major >= 3:\n if 'content' in data:\n for content_type in data['content']:\n schema = data['content'][content_type]['schema']\n data['content'][content_type]['schema'\n ] = self.openapi.resolve_schema_dict(schema)\n <mask token>\n <mask token>\n <mask token>\n\n def response_helper(self, response, **kwargs):\n \"\"\"Response component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in response definition.\n\n :param dict parameter: response fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n return response\n\n def operation_helper(self, operations, **kwargs):\n for operation in operations.values():\n if not isinstance(operation, dict):\n continue\n if 'parameters' in operation:\n operation['parameters'] = self.resolve_parameters(operation\n ['parameters'])\n if self.openapi_version.major >= 3:\n if 'requestBody' in operation:\n self.resolve_schema_in_request_body(operation[\n 'requestBody'])\n for response in operation.get('responses', {}).values():\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n\n def warn_if_schema_already_in_spec(self, schema_key):\n \"\"\"Method to warn the user if the schema has already been added to the\n spec.\n \"\"\"\n if schema_key in self.openapi.refs:\n warnings.warn(\n '{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'\n .format(schema_key[0]), UserWarning)\n",
"step-2": "<mask token>\n\n\nclass SchematicsPlugin(BasePlugin):\n <mask token>\n\n def __init__(self, schema_name_resolver=None):\n super().__init__()\n self.schema_name_resolver = schema_name_resolver or resolver\n self.spec = None\n self.openapi_version = None\n self.openapi = None\n\n def init_spec(self, spec):\n super().init_spec(spec)\n self.spec = spec\n self.openapi_version = spec.openapi_version\n self.openapi = OpenAPIConverter(openapi_version=spec.\n openapi_version, schema_name_resolver=self.schema_name_resolver,\n spec=spec)\n\n def resolve_parameters(self, parameters):\n resolved = []\n for parameter in parameters:\n if isinstance(parameter, dict) and not isinstance(parameter.get\n ('schema', {}), dict):\n schema_instance = resolve_schema_instance(parameter['schema'])\n if 'in' in parameter:\n del parameter['schema']\n resolved += self.openapi.schema2parameters(schema_instance,\n default_in=parameter.pop('in'), **parameter)\n continue\n self.resolve_schema(parameter)\n resolved.append(parameter)\n return resolved\n\n def resolve_schema_in_request_body(self, request_body):\n \"\"\"Function to resolve a schema in a requestBody object - modifies then\n response dict to convert Marshmallow Schema object or class into dict\n \"\"\"\n content = request_body['content']\n for content_type in content:\n schema = content[content_type]['schema']\n content[content_type]['schema'] = self.openapi.resolve_schema_dict(\n schema)\n\n def resolve_schema(self, data):\n \"\"\"Function to resolve a schema in a parameter or response - modifies the\n corresponding dict to convert Marshmallow Schema object or class into dict\n\n :param APISpec spec: `APISpec` containing refs.\n :param dict|str data: either a parameter or response dictionary that may\n contain a schema, or a reference provided as string\n \"\"\"\n if not isinstance(data, dict):\n return\n if 'schema' in data:\n data['schema'] = self.openapi.resolve_schema_dict(data['schema'])\n if self.openapi_version.major >= 3:\n if 'content' in data:\n for content_type in data['content']:\n schema = data['content'][content_type]['schema']\n data['content'][content_type]['schema'\n ] = self.openapi.resolve_schema_dict(schema)\n\n def map_to_openapi_type(self, *args):\n \"\"\"Decorator to set mapping for custom fields.\n\n ``*args`` can be:\n\n - a pair of the form ``(type, format)``\n - a core marshmallow field type (in which case we reuse that type's mapping)\n\n Examples: ::\n\n @ma_plugin.map_to_openapi_type('string', 'uuid')\n class MyCustomField(Integer):\n # ...\n\n @ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')\n class MyCustomFieldThatsKindaLikeAnInteger(Integer):\n # ...\n \"\"\"\n return self.openapi.map_to_openapi_type(*args)\n\n def schema_helper(self, name, _, schema=None, **kwargs):\n \"\"\"Definition helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` to provide OpenAPI\n metadata.\n\n :param type|Schema schema: A marshmallow Schema class or instance.\n \"\"\"\n if schema is None:\n return None\n schema_instance = resolve_schema_instance(schema)\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n return json_schema\n\n def parameter_helper(self, parameter, **kwargs):\n \"\"\"Parameter component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in parameter definition.\n\n :param dict parameter: parameter fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(parameter)\n return parameter\n\n def response_helper(self, response, **kwargs):\n \"\"\"Response component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in response definition.\n\n :param dict parameter: response fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n return response\n\n def operation_helper(self, operations, **kwargs):\n for operation in operations.values():\n if not isinstance(operation, dict):\n continue\n if 'parameters' in operation:\n operation['parameters'] = self.resolve_parameters(operation\n ['parameters'])\n if self.openapi_version.major >= 3:\n if 'requestBody' in operation:\n self.resolve_schema_in_request_body(operation[\n 'requestBody'])\n for response in operation.get('responses', {}).values():\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n\n def warn_if_schema_already_in_spec(self, schema_key):\n \"\"\"Method to warn the user if the schema has already been added to the\n spec.\n \"\"\"\n if schema_key in self.openapi.refs:\n warnings.warn(\n '{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'\n .format(schema_key[0]), UserWarning)\n",
"step-3": "<mask token>\n\n\nclass SchematicsPlugin(BasePlugin):\n \"\"\"APISpec plugin handling schematics models\n\n :param callable schema_name_resolver: Callable to generate the schema definition name.\n Receives the `Schema` class and returns the name to be used in refs within\n the generated spec. When working with circular referencing this function\n must must not return `None` for schemas in a circular reference chain.\n\n Example: ::\n\n def schema_name_resolver(schema):\n return schema.__name__\n \"\"\"\n\n def __init__(self, schema_name_resolver=None):\n super().__init__()\n self.schema_name_resolver = schema_name_resolver or resolver\n self.spec = None\n self.openapi_version = None\n self.openapi = None\n\n def init_spec(self, spec):\n super().init_spec(spec)\n self.spec = spec\n self.openapi_version = spec.openapi_version\n self.openapi = OpenAPIConverter(openapi_version=spec.\n openapi_version, schema_name_resolver=self.schema_name_resolver,\n spec=spec)\n\n def resolve_parameters(self, parameters):\n resolved = []\n for parameter in parameters:\n if isinstance(parameter, dict) and not isinstance(parameter.get\n ('schema', {}), dict):\n schema_instance = resolve_schema_instance(parameter['schema'])\n if 'in' in parameter:\n del parameter['schema']\n resolved += self.openapi.schema2parameters(schema_instance,\n default_in=parameter.pop('in'), **parameter)\n continue\n self.resolve_schema(parameter)\n resolved.append(parameter)\n return resolved\n\n def resolve_schema_in_request_body(self, request_body):\n \"\"\"Function to resolve a schema in a requestBody object - modifies then\n response dict to convert Marshmallow Schema object or class into dict\n \"\"\"\n content = request_body['content']\n for content_type in content:\n schema = content[content_type]['schema']\n content[content_type]['schema'] = self.openapi.resolve_schema_dict(\n schema)\n\n def resolve_schema(self, data):\n \"\"\"Function to resolve a schema in a parameter or response - modifies the\n corresponding dict to convert Marshmallow Schema object or class into dict\n\n :param APISpec spec: `APISpec` containing refs.\n :param dict|str data: either a parameter or response dictionary that may\n contain a schema, or a reference provided as string\n \"\"\"\n if not isinstance(data, dict):\n return\n if 'schema' in data:\n data['schema'] = self.openapi.resolve_schema_dict(data['schema'])\n if self.openapi_version.major >= 3:\n if 'content' in data:\n for content_type in data['content']:\n schema = data['content'][content_type]['schema']\n data['content'][content_type]['schema'\n ] = self.openapi.resolve_schema_dict(schema)\n\n def map_to_openapi_type(self, *args):\n \"\"\"Decorator to set mapping for custom fields.\n\n ``*args`` can be:\n\n - a pair of the form ``(type, format)``\n - a core marshmallow field type (in which case we reuse that type's mapping)\n\n Examples: ::\n\n @ma_plugin.map_to_openapi_type('string', 'uuid')\n class MyCustomField(Integer):\n # ...\n\n @ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')\n class MyCustomFieldThatsKindaLikeAnInteger(Integer):\n # ...\n \"\"\"\n return self.openapi.map_to_openapi_type(*args)\n\n def schema_helper(self, name, _, schema=None, **kwargs):\n \"\"\"Definition helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` to provide OpenAPI\n metadata.\n\n :param type|Schema schema: A marshmallow Schema class or instance.\n \"\"\"\n if schema is None:\n return None\n schema_instance = resolve_schema_instance(schema)\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n return json_schema\n\n def parameter_helper(self, parameter, **kwargs):\n \"\"\"Parameter component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in parameter definition.\n\n :param dict parameter: parameter fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(parameter)\n return parameter\n\n def response_helper(self, response, **kwargs):\n \"\"\"Response component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in response definition.\n\n :param dict parameter: response fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n return response\n\n def operation_helper(self, operations, **kwargs):\n for operation in operations.values():\n if not isinstance(operation, dict):\n continue\n if 'parameters' in operation:\n operation['parameters'] = self.resolve_parameters(operation\n ['parameters'])\n if self.openapi_version.major >= 3:\n if 'requestBody' in operation:\n self.resolve_schema_in_request_body(operation[\n 'requestBody'])\n for response in operation.get('responses', {}).values():\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n\n def warn_if_schema_already_in_spec(self, schema_key):\n \"\"\"Method to warn the user if the schema has already been added to the\n spec.\n \"\"\"\n if schema_key in self.openapi.refs:\n warnings.warn(\n '{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'\n .format(schema_key[0]), UserWarning)\n",
"step-4": "<mask token>\n\n\ndef resolver(schema):\n \"\"\"Default implementation of a schema name resolver function\n \"\"\"\n name = schema.__name__\n if name.endswith('Schema'):\n return name[:-6] or name\n return name\n\n\nclass SchematicsPlugin(BasePlugin):\n \"\"\"APISpec plugin handling schematics models\n\n :param callable schema_name_resolver: Callable to generate the schema definition name.\n Receives the `Schema` class and returns the name to be used in refs within\n the generated spec. When working with circular referencing this function\n must must not return `None` for schemas in a circular reference chain.\n\n Example: ::\n\n def schema_name_resolver(schema):\n return schema.__name__\n \"\"\"\n\n def __init__(self, schema_name_resolver=None):\n super().__init__()\n self.schema_name_resolver = schema_name_resolver or resolver\n self.spec = None\n self.openapi_version = None\n self.openapi = None\n\n def init_spec(self, spec):\n super().init_spec(spec)\n self.spec = spec\n self.openapi_version = spec.openapi_version\n self.openapi = OpenAPIConverter(openapi_version=spec.\n openapi_version, schema_name_resolver=self.schema_name_resolver,\n spec=spec)\n\n def resolve_parameters(self, parameters):\n resolved = []\n for parameter in parameters:\n if isinstance(parameter, dict) and not isinstance(parameter.get\n ('schema', {}), dict):\n schema_instance = resolve_schema_instance(parameter['schema'])\n if 'in' in parameter:\n del parameter['schema']\n resolved += self.openapi.schema2parameters(schema_instance,\n default_in=parameter.pop('in'), **parameter)\n continue\n self.resolve_schema(parameter)\n resolved.append(parameter)\n return resolved\n\n def resolve_schema_in_request_body(self, request_body):\n \"\"\"Function to resolve a schema in a requestBody object - modifies then\n response dict to convert Marshmallow Schema object or class into dict\n \"\"\"\n content = request_body['content']\n for content_type in content:\n schema = content[content_type]['schema']\n content[content_type]['schema'] = self.openapi.resolve_schema_dict(\n schema)\n\n def resolve_schema(self, data):\n \"\"\"Function to resolve a schema in a parameter or response - modifies the\n corresponding dict to convert Marshmallow Schema object or class into dict\n\n :param APISpec spec: `APISpec` containing refs.\n :param dict|str data: either a parameter or response dictionary that may\n contain a schema, or a reference provided as string\n \"\"\"\n if not isinstance(data, dict):\n return\n if 'schema' in data:\n data['schema'] = self.openapi.resolve_schema_dict(data['schema'])\n if self.openapi_version.major >= 3:\n if 'content' in data:\n for content_type in data['content']:\n schema = data['content'][content_type]['schema']\n data['content'][content_type]['schema'\n ] = self.openapi.resolve_schema_dict(schema)\n\n def map_to_openapi_type(self, *args):\n \"\"\"Decorator to set mapping for custom fields.\n\n ``*args`` can be:\n\n - a pair of the form ``(type, format)``\n - a core marshmallow field type (in which case we reuse that type's mapping)\n\n Examples: ::\n\n @ma_plugin.map_to_openapi_type('string', 'uuid')\n class MyCustomField(Integer):\n # ...\n\n @ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')\n class MyCustomFieldThatsKindaLikeAnInteger(Integer):\n # ...\n \"\"\"\n return self.openapi.map_to_openapi_type(*args)\n\n def schema_helper(self, name, _, schema=None, **kwargs):\n \"\"\"Definition helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` to provide OpenAPI\n metadata.\n\n :param type|Schema schema: A marshmallow Schema class or instance.\n \"\"\"\n if schema is None:\n return None\n schema_instance = resolve_schema_instance(schema)\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n return json_schema\n\n def parameter_helper(self, parameter, **kwargs):\n \"\"\"Parameter component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in parameter definition.\n\n :param dict parameter: parameter fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(parameter)\n return parameter\n\n def response_helper(self, response, **kwargs):\n \"\"\"Response component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in response definition.\n\n :param dict parameter: response fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n return response\n\n def operation_helper(self, operations, **kwargs):\n for operation in operations.values():\n if not isinstance(operation, dict):\n continue\n if 'parameters' in operation:\n operation['parameters'] = self.resolve_parameters(operation\n ['parameters'])\n if self.openapi_version.major >= 3:\n if 'requestBody' in operation:\n self.resolve_schema_in_request_body(operation[\n 'requestBody'])\n for response in operation.get('responses', {}).values():\n self.resolve_schema(response)\n if 'headers' in response:\n for header in response['headers'].values():\n self.resolve_schema(header)\n\n def warn_if_schema_already_in_spec(self, schema_key):\n \"\"\"Method to warn the user if the schema has already been added to the\n spec.\n \"\"\"\n if schema_key in self.openapi.refs:\n warnings.warn(\n '{} has already been added to the spec. Adding it twice may cause references to not resolve properly.'\n .format(schema_key[0]), UserWarning)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"\n openapi.schematics\n ~~~~~~~~~~~~~~~~~~\n Schematics plugin for apispec based on ext.MarshmallowPlugin\n\"\"\"\nimport warnings\n\nfrom apispec import BasePlugin\nfrom .common import resolve_schema_instance, make_schema_key\nfrom .openapi import OpenAPIConverter\n\n\ndef resolver(schema):\n \"\"\"Default implementation of a schema name resolver function\n \"\"\"\n name = schema.__name__\n if name.endswith(\"Schema\"):\n return name[:-6] or name\n return name\n\n\nclass SchematicsPlugin(BasePlugin):\n \"\"\"APISpec plugin handling schematics models\n\n :param callable schema_name_resolver: Callable to generate the schema definition name.\n Receives the `Schema` class and returns the name to be used in refs within\n the generated spec. When working with circular referencing this function\n must must not return `None` for schemas in a circular reference chain.\n\n Example: ::\n\n def schema_name_resolver(schema):\n return schema.__name__\n \"\"\"\n\n def __init__(self, schema_name_resolver=None):\n super().__init__()\n self.schema_name_resolver = schema_name_resolver or resolver\n self.spec = None\n self.openapi_version = None\n self.openapi = None\n\n def init_spec(self, spec):\n super().init_spec(spec)\n self.spec = spec\n self.openapi_version = spec.openapi_version\n self.openapi = OpenAPIConverter(\n openapi_version=spec.openapi_version,\n schema_name_resolver=self.schema_name_resolver,\n spec=spec,\n )\n\n def resolve_parameters(self, parameters):\n resolved = []\n for parameter in parameters:\n if isinstance(parameter, dict) and not isinstance(\n parameter.get(\"schema\", {}), dict\n ):\n schema_instance = resolve_schema_instance(parameter[\"schema\"])\n if \"in\" in parameter:\n del parameter[\"schema\"]\n resolved += self.openapi.schema2parameters(\n schema_instance, default_in=parameter.pop(\"in\"), **parameter\n )\n continue\n self.resolve_schema(parameter)\n resolved.append(parameter)\n return resolved\n\n def resolve_schema_in_request_body(self, request_body):\n \"\"\"Function to resolve a schema in a requestBody object - modifies then\n response dict to convert Marshmallow Schema object or class into dict\n \"\"\"\n content = request_body[\"content\"]\n for content_type in content:\n schema = content[content_type][\"schema\"]\n content[content_type][\"schema\"] = self.openapi.resolve_schema_dict(schema)\n\n def resolve_schema(self, data):\n \"\"\"Function to resolve a schema in a parameter or response - modifies the\n corresponding dict to convert Marshmallow Schema object or class into dict\n\n :param APISpec spec: `APISpec` containing refs.\n :param dict|str data: either a parameter or response dictionary that may\n contain a schema, or a reference provided as string\n \"\"\"\n if not isinstance(data, dict):\n return\n\n # OAS 2 component or OAS 3 header\n if \"schema\" in data:\n data[\"schema\"] = self.openapi.resolve_schema_dict(data[\"schema\"])\n # OAS 3 component except header\n if self.openapi_version.major >= 3:\n if \"content\" in data:\n for content_type in data[\"content\"]:\n schema = data[\"content\"][content_type][\"schema\"]\n data[\"content\"][content_type][\n \"schema\"\n ] = self.openapi.resolve_schema_dict(schema)\n\n def map_to_openapi_type(self, *args):\n \"\"\"Decorator to set mapping for custom fields.\n\n ``*args`` can be:\n\n - a pair of the form ``(type, format)``\n - a core marshmallow field type (in which case we reuse that type's mapping)\n\n Examples: ::\n\n @ma_plugin.map_to_openapi_type('string', 'uuid')\n class MyCustomField(Integer):\n # ...\n\n @ma_plugin.map_to_openapi_type(Integer) # will map to ('integer', 'int32')\n class MyCustomFieldThatsKindaLikeAnInteger(Integer):\n # ...\n \"\"\"\n return self.openapi.map_to_openapi_type(*args)\n\n def schema_helper(self, name, _, schema=None, **kwargs):\n \"\"\"Definition helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` to provide OpenAPI\n metadata.\n\n :param type|Schema schema: A marshmallow Schema class or instance.\n \"\"\"\n if schema is None:\n return None\n\n schema_instance = resolve_schema_instance(schema)\n\n schema_key = make_schema_key(schema_instance)\n self.warn_if_schema_already_in_spec(schema_key)\n self.openapi.refs[schema_key] = name\n\n json_schema = self.openapi.schema2jsonschema(schema_instance)\n\n return json_schema\n\n def parameter_helper(self, parameter, **kwargs):\n \"\"\"Parameter component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in parameter definition.\n\n :param dict parameter: parameter fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n # In OpenAPIv3, this only works when using the complex form using \"content\"\n self.resolve_schema(parameter)\n return parameter\n\n def response_helper(self, response, **kwargs):\n \"\"\"Response component helper that allows using a marshmallow\n :class:`Schema <marshmallow.Schema>` in response definition.\n\n :param dict parameter: response fields. May contain a marshmallow\n Schema class or instance.\n \"\"\"\n self.resolve_schema(response)\n if \"headers\" in response:\n for header in response[\"headers\"].values():\n self.resolve_schema(header)\n return response\n\n def operation_helper(self, operations, **kwargs):\n for operation in operations.values():\n if not isinstance(operation, dict):\n continue\n if \"parameters\" in operation:\n operation[\"parameters\"] = self.resolve_parameters(\n operation[\"parameters\"]\n )\n if self.openapi_version.major >= 3:\n if \"requestBody\" in operation:\n self.resolve_schema_in_request_body(operation[\"requestBody\"])\n for response in operation.get(\"responses\", {}).values():\n self.resolve_schema(response)\n if \"headers\" in response:\n for header in response[\"headers\"].values():\n self.resolve_schema(header)\n\n def warn_if_schema_already_in_spec(self, schema_key):\n \"\"\"Method to warn the user if the schema has already been added to the\n spec.\n \"\"\"\n if schema_key in self.openapi.refs:\n warnings.warn(\n \"{} has already been added to the spec. Adding it twice may \"\n \"cause references to not resolve properly.\".format(schema_key[0]),\n UserWarning,\n )\n",
"step-ids": [
9,
12,
13,
14,
16
]
}
|
[
9,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lambda_handler(event, context):
print(requests)
apiKey = os.environ['newrelic_api_key']
headers = {'content-type': 'application/json', 'Accept-Charset':
'UTF-8', 'X-api-key': apiKey}
r = requests.get('https://api.newrelic.com/v2/applications.json',
headers=headers)
return r.json()
<|reserved_special_token_1|>
import requests, os
def lambda_handler(event, context):
print(requests)
apiKey = os.environ['newrelic_api_key']
headers = {'content-type': 'application/json', 'Accept-Charset':
'UTF-8', 'X-api-key': apiKey}
r = requests.get('https://api.newrelic.com/v2/applications.json',
headers=headers)
return r.json()
|
flexible
|
{
"blob_id": "e89600f109335ffdb00c13f617d61496c547ba61",
"index": 5612,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lambda_handler(event, context):\n print(requests)\n apiKey = os.environ['newrelic_api_key']\n headers = {'content-type': 'application/json', 'Accept-Charset':\n 'UTF-8', 'X-api-key': apiKey}\n r = requests.get('https://api.newrelic.com/v2/applications.json',\n headers=headers)\n return r.json()\n",
"step-3": "import requests, os\n\n\ndef lambda_handler(event, context):\n print(requests)\n apiKey = os.environ['newrelic_api_key']\n headers = {'content-type': 'application/json', 'Accept-Charset':\n 'UTF-8', 'X-api-key': apiKey}\n r = requests.get('https://api.newrelic.com/v2/applications.json',\n headers=headers)\n return r.json()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Person:
alive = True
<|reserved_special_token_0|>
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print('Hello ', self.name)
def greetByTime(self, time='Morning'):
print('Hello', self.name, ' . ', time)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person:
alive = True
"""
Possible Attributes for a Person:
1. Name
2. Age
3. Gender
"""
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print('Hello ', self.name)
def greetByTime(self, time='Morning'):
print('Hello', self.name, ' . ', time)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person:
alive = True
"""
Possible Attributes for a Person:
1. Name
2. Age
3. Gender
"""
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print('Hello ', self.name)
def greetByTime(self, time='Morning'):
print('Hello', self.name, ' . ', time)
print('Accessing Static Variable', Person.alive)
<|reserved_special_token_0|>
print("""
Accessing Functions
""")
p.greet()
p.greetByTime()
p.greetByTime('Goodnight')
print("""
Accessing Variables
""")
print(p.name, p.age, p.gender)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Person:
alive = True
"""
Possible Attributes for a Person:
1. Name
2. Age
3. Gender
"""
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print('Hello ', self.name)
def greetByTime(self, time='Morning'):
print('Hello', self.name, ' . ', time)
print('Accessing Static Variable', Person.alive)
p = Person('John', 30, 'Male')
print("""
Accessing Functions
""")
p.greet()
p.greetByTime()
p.greetByTime('Goodnight')
print("""
Accessing Variables
""")
print(p.name, p.age, p.gender)
<|reserved_special_token_1|>
'''
Classes
'''
class Person:
alive = True
'''
Possible Attributes for a Person:
1. Name
2. Age
3. Gender
'''
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print("Hello ", self.name)
def greetByTime(self, time="Morning"):
print("Hello", self.name, " . ", time)
print("Accessing Static Variable", Person.alive)
p = Person("John", 30, "Male")
print("\n\nAccessing Functions \n\n")
p.greet()
p.greetByTime()
p.greetByTime("Goodnight")
print("\n\nAccessing Variables \n\n")
print(p.name, p.age, p.gender)
|
flexible
|
{
"blob_id": "11feb13f38f2484c867a8b3fa525ffecf419dfe5",
"index": 9957,
"step-1": "<mask token>\n\n\nclass Person:\n alive = True\n <mask token>\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\nprint('Accessing Static Variable', Person.alive)\n<mask token>\nprint(\"\"\"\n\nAccessing Functions \n\n\"\"\")\np.greet()\np.greetByTime()\np.greetByTime('Goodnight')\nprint(\"\"\"\n\nAccessing Variables \n\n\"\"\")\nprint(p.name, p.age, p.gender)\n",
"step-4": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\nprint('Accessing Static Variable', Person.alive)\np = Person('John', 30, 'Male')\nprint(\"\"\"\n\nAccessing Functions \n\n\"\"\")\np.greet()\np.greetByTime()\np.greetByTime('Goodnight')\nprint(\"\"\"\n\nAccessing Variables \n\n\"\"\")\nprint(p.name, p.age, p.gender)\n",
"step-5": "'''\n\nClasses\n\n'''\n\n\nclass Person:\n alive = True\n\n '''\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n '''\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print(\"Hello \", self.name)\n\n def greetByTime(self, time=\"Morning\"):\n print(\"Hello\", self.name, \" . \", time)\n\n\nprint(\"Accessing Static Variable\", Person.alive)\np = Person(\"John\", 30, \"Male\")\n\nprint(\"\\n\\nAccessing Functions \\n\\n\")\np.greet()\np.greetByTime()\np.greetByTime(\"Goodnight\")\n\nprint(\"\\n\\nAccessing Variables \\n\\n\")\nprint(p.name, p.age, p.gender)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class Opcode1(Opcode):
<|reserved_special_token_0|>
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 1, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first + self.__second
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def op(self):
return '+'
<|reserved_special_token_0|>
class Opcode2(Opcode):
"""
>>> o = Opcode2([2, 2, 3, 4, 99], 0)
>>> o.run()
True
>>> o.memory
[2, 2, 3, 4, 12]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 2, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first * self.__second
return True
def params(self):
return {'noun': self.__first, 'verb': self.__second, 'result': self
.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return '*'
def __str__(self):
return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)
class Opcode99(Opcode):
"""
>>> o = Opcode99([99,12,3,4,5], 0)
>>> o.run()
False
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 99, 1)
def run(self):
return False
def params(self):
return {}
def reads(self):
return []
def writes(self):
return None
def op(self):
return 'HALT'
def __str__(self):
return 'HALT'
<|reserved_special_token_0|>
class Interpreter(object):
def __init__(self, input_code, ops=default_ops()):
self.__memory = input_code
self.__ops = ops
self.__ptr = 0
self.__running = True
self.length = len(self.__memory)
def stepi(self):
o = None
if self.__running:
o = self.next_op()
self.__running = o.run()
chk, val = o.set_ptr()
if chk:
self.__ptr = val
else:
self.__ptr += o.ptr_inc()
return o
def run(self):
while self.__running:
self.stepi()
def inspect(self, loc):
return self.__memory[loc]
def next_op(self):
return self.op_at(self.__ptr)
def op_at(self, ptr):
return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)
def __str__(self):
strs = []
for i, v in enumerate(self.__memory):
if i == self.__ptr:
strs.append('{:*>4}'.format(v))
else:
strs.append('{:>4}'.format(v))
return ','.join(strs) + '\n' + 'Next:\n\t' + str(self.next_op())
def poke(self, loc, val):
self.__memory[loc] = val
def rebind(self, code, call):
self.__ops[code] = call
def as_opcodes(self):
ops = [self.op_at(0)]
ptr = ops[-1].ptr_inc()
while ops[-1].op() != 'HALT':
ops.append(self.op_at(ptr))
ptr += ops[-1].ptr_inc()
return ops
class ValueNode(object):
def __init__(self, val, tag=''):
self.__val = val
self.__tag = tag
def __str__(self):
return self.__tag + str(self.__val)
class OpNode(object):
def __init__(self, op, depends):
self.__op = op
self.__depends = depends
def __str__(self):
return '(' + self.__op.op().join([str(i) for i in self.__depends]
) + ')'
class OpcodeTreeBuilder(object):
def __init__(self, interp):
self.__interpreter = interp
self.__codes = interp.as_opcodes()
def construct_mappings(self):
for i in self.__codes:
params = i.params()
if 'result' in params.keys():
if params['result'] not in self.__writes_to.keys():
self.__writes_to[params['result']] = []
self.__writes_to[params['result']].append(i)
if 'noun' in params.keys():
if params['noun'] not in self.__reads_from.keys():
self.__reads_from[params['noun']] = []
self.__reads_from[params['noun']].append(i)
if 'verb' in params.keys():
if params['verb'] not in self.__reads_from.keys():
self.__reads_from[params['verb']] = []
self.__reads_from[params['verb']].append(i)
def construct_graph(self):
op = self.__interpreter.op_at(0)
reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for
i in op.reads()]
writes = op.writes()
base = OpNode(op, reads)
ptr = op.ptr_inc()
last_write = {}
if writes:
last_write[writes] = base
while op.op() != 'HALT':
op = self.__interpreter.op_at(ptr)
if op.op() == 'HALT':
break
depends = []
for i in op.reads():
if i in last_write.keys():
depends.append(last_write[i])
else:
depends.append(ValueNode(self.__interpreter.inspect(i)))
base = OpNode(op, depends)
if op.writes():
last_write[op.writes()] = base
ptr += op.ptr_inc()
return base
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Opcode1(Opcode):
<|reserved_special_token_0|>
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 1, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first + self.__second
return True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def writes(self):
return self.__res
def op(self):
return '+'
<|reserved_special_token_0|>
class Opcode2(Opcode):
"""
>>> o = Opcode2([2, 2, 3, 4, 99], 0)
>>> o.run()
True
>>> o.memory
[2, 2, 3, 4, 12]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 2, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first * self.__second
return True
def params(self):
return {'noun': self.__first, 'verb': self.__second, 'result': self
.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return '*'
def __str__(self):
return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)
class Opcode99(Opcode):
"""
>>> o = Opcode99([99,12,3,4,5], 0)
>>> o.run()
False
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 99, 1)
def run(self):
return False
def params(self):
return {}
def reads(self):
return []
def writes(self):
return None
def op(self):
return 'HALT'
def __str__(self):
return 'HALT'
<|reserved_special_token_0|>
class Interpreter(object):
def __init__(self, input_code, ops=default_ops()):
self.__memory = input_code
self.__ops = ops
self.__ptr = 0
self.__running = True
self.length = len(self.__memory)
def stepi(self):
o = None
if self.__running:
o = self.next_op()
self.__running = o.run()
chk, val = o.set_ptr()
if chk:
self.__ptr = val
else:
self.__ptr += o.ptr_inc()
return o
def run(self):
while self.__running:
self.stepi()
def inspect(self, loc):
return self.__memory[loc]
def next_op(self):
return self.op_at(self.__ptr)
def op_at(self, ptr):
return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)
def __str__(self):
strs = []
for i, v in enumerate(self.__memory):
if i == self.__ptr:
strs.append('{:*>4}'.format(v))
else:
strs.append('{:>4}'.format(v))
return ','.join(strs) + '\n' + 'Next:\n\t' + str(self.next_op())
def poke(self, loc, val):
self.__memory[loc] = val
def rebind(self, code, call):
self.__ops[code] = call
def as_opcodes(self):
ops = [self.op_at(0)]
ptr = ops[-1].ptr_inc()
while ops[-1].op() != 'HALT':
ops.append(self.op_at(ptr))
ptr += ops[-1].ptr_inc()
return ops
class ValueNode(object):
def __init__(self, val, tag=''):
self.__val = val
self.__tag = tag
def __str__(self):
return self.__tag + str(self.__val)
class OpNode(object):
def __init__(self, op, depends):
self.__op = op
self.__depends = depends
def __str__(self):
return '(' + self.__op.op().join([str(i) for i in self.__depends]
) + ')'
class OpcodeTreeBuilder(object):
def __init__(self, interp):
self.__interpreter = interp
self.__codes = interp.as_opcodes()
def construct_mappings(self):
for i in self.__codes:
params = i.params()
if 'result' in params.keys():
if params['result'] not in self.__writes_to.keys():
self.__writes_to[params['result']] = []
self.__writes_to[params['result']].append(i)
if 'noun' in params.keys():
if params['noun'] not in self.__reads_from.keys():
self.__reads_from[params['noun']] = []
self.__reads_from[params['noun']].append(i)
if 'verb' in params.keys():
if params['verb'] not in self.__reads_from.keys():
self.__reads_from[params['verb']] = []
self.__reads_from[params['verb']].append(i)
def construct_graph(self):
op = self.__interpreter.op_at(0)
reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for
i in op.reads()]
writes = op.writes()
base = OpNode(op, reads)
ptr = op.ptr_inc()
last_write = {}
if writes:
last_write[writes] = base
while op.op() != 'HALT':
op = self.__interpreter.op_at(ptr)
if op.op() == 'HALT':
break
depends = []
for i in op.reads():
if i in last_write.keys():
depends.append(last_write[i])
else:
depends.append(ValueNode(self.__interpreter.inspect(i)))
base = OpNode(op, depends)
if op.writes():
last_write[op.writes()] = base
ptr += op.ptr_inc()
return base
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Opcode(object):
<|reserved_special_token_0|>
def ptr_inc(self):
return self.__ptr_inc
def get_val(self, arg_idx):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o.get_val(1)
4
>>> o.get_val(2)
4
>>> o.get_val(3)
2
"""
idx = arg_idx - 1
if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:
return self.memory[self.memory[self.ptr + arg_idx]]
elif self.__par_modes[idx] == 1:
return self.memory[self.ptr + arg_idx]
def set_ptr(self):
return False, 0
def reads(self):
raise Exception('Call to base class reads()')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def params(self):
raise Exception('Call to base class params()')
def run(self):
raise Exception('Call to base class run()')
class Opcode1(Opcode):
"""
>>> o = Opcode1([101, 2, 1, 3], 0)
>>> o.run()
True
>>> o.memory
[101, 2, 1, 4]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 1, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first + self.__second
return True
def params(self):
return {'noun': self.__first, 'verb': self.__second, 'result': self
.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return '+'
def __str__(self):
return 'loc[%d] = %d + %d' % (self.__res, self.__first, self.__second)
class Opcode2(Opcode):
"""
>>> o = Opcode2([2, 2, 3, 4, 99], 0)
>>> o.run()
True
>>> o.memory
[2, 2, 3, 4, 12]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 2, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first * self.__second
return True
def params(self):
return {'noun': self.__first, 'verb': self.__second, 'result': self
.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return '*'
def __str__(self):
return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)
class Opcode99(Opcode):
"""
>>> o = Opcode99([99,12,3,4,5], 0)
>>> o.run()
False
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 99, 1)
def run(self):
return False
def params(self):
return {}
def reads(self):
return []
def writes(self):
return None
def op(self):
return 'HALT'
def __str__(self):
return 'HALT'
<|reserved_special_token_0|>
class Interpreter(object):
def __init__(self, input_code, ops=default_ops()):
self.__memory = input_code
self.__ops = ops
self.__ptr = 0
self.__running = True
self.length = len(self.__memory)
def stepi(self):
o = None
if self.__running:
o = self.next_op()
self.__running = o.run()
chk, val = o.set_ptr()
if chk:
self.__ptr = val
else:
self.__ptr += o.ptr_inc()
return o
def run(self):
while self.__running:
self.stepi()
def inspect(self, loc):
return self.__memory[loc]
def next_op(self):
return self.op_at(self.__ptr)
def op_at(self, ptr):
return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)
def __str__(self):
strs = []
for i, v in enumerate(self.__memory):
if i == self.__ptr:
strs.append('{:*>4}'.format(v))
else:
strs.append('{:>4}'.format(v))
return ','.join(strs) + '\n' + 'Next:\n\t' + str(self.next_op())
def poke(self, loc, val):
self.__memory[loc] = val
def rebind(self, code, call):
self.__ops[code] = call
def as_opcodes(self):
ops = [self.op_at(0)]
ptr = ops[-1].ptr_inc()
while ops[-1].op() != 'HALT':
ops.append(self.op_at(ptr))
ptr += ops[-1].ptr_inc()
return ops
class ValueNode(object):
def __init__(self, val, tag=''):
self.__val = val
self.__tag = tag
def __str__(self):
return self.__tag + str(self.__val)
class OpNode(object):
def __init__(self, op, depends):
self.__op = op
self.__depends = depends
def __str__(self):
return '(' + self.__op.op().join([str(i) for i in self.__depends]
) + ')'
class OpcodeTreeBuilder(object):
def __init__(self, interp):
self.__interpreter = interp
self.__codes = interp.as_opcodes()
def construct_mappings(self):
for i in self.__codes:
params = i.params()
if 'result' in params.keys():
if params['result'] not in self.__writes_to.keys():
self.__writes_to[params['result']] = []
self.__writes_to[params['result']].append(i)
if 'noun' in params.keys():
if params['noun'] not in self.__reads_from.keys():
self.__reads_from[params['noun']] = []
self.__reads_from[params['noun']].append(i)
if 'verb' in params.keys():
if params['verb'] not in self.__reads_from.keys():
self.__reads_from[params['verb']] = []
self.__reads_from[params['verb']].append(i)
def construct_graph(self):
op = self.__interpreter.op_at(0)
reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for
i in op.reads()]
writes = op.writes()
base = OpNode(op, reads)
ptr = op.ptr_inc()
last_write = {}
if writes:
last_write[writes] = base
while op.op() != 'HALT':
op = self.__interpreter.op_at(ptr)
if op.op() == 'HALT':
break
depends = []
for i in op.reads():
if i in last_write.keys():
depends.append(last_write[i])
else:
depends.append(ValueNode(self.__interpreter.inspect(i)))
base = OpNode(op, depends)
if op.writes():
last_write[op.writes()] = base
ptr += op.ptr_inc()
return base
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Opcode(object):
def __init__(self, mem, ptr, code, inc):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o._Opcode__par_modes
[0, 1]
"""
if mem[ptr] % 100 != code:
raise Exception('Creating Opcode%d for opcode %d' % (code, mem[
ptr]))
self.memory = mem
self.ptr = ptr
self.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr] /
100))]))
self.__ptr_inc = inc
def ptr_inc(self):
return self.__ptr_inc
def get_val(self, arg_idx):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o.get_val(1)
4
>>> o.get_val(2)
4
>>> o.get_val(3)
2
"""
idx = arg_idx - 1
if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:
return self.memory[self.memory[self.ptr + arg_idx]]
elif self.__par_modes[idx] == 1:
return self.memory[self.ptr + arg_idx]
def set_ptr(self):
return False, 0
def reads(self):
raise Exception('Call to base class reads()')
def writes(self):
raise Exception('Call to base class writes()')
<|reserved_special_token_0|>
def params(self):
raise Exception('Call to base class params()')
def run(self):
raise Exception('Call to base class run()')
class Opcode1(Opcode):
"""
>>> o = Opcode1([101, 2, 1, 3], 0)
>>> o.run()
True
>>> o.memory
[101, 2, 1, 4]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 1, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first + self.__second
return True
def params(self):
return {'noun': self.__first, 'verb': self.__second, 'result': self
.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return '+'
def __str__(self):
return 'loc[%d] = %d + %d' % (self.__res, self.__first, self.__second)
class Opcode2(Opcode):
"""
>>> o = Opcode2([2, 2, 3, 4, 99], 0)
>>> o.run()
True
>>> o.memory
[2, 2, 3, 4, 12]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 2, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr + 3]
def run(self):
self.memory[self.__res] = self.__first * self.__second
return True
def params(self):
return {'noun': self.__first, 'verb': self.__second, 'result': self
.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return '*'
def __str__(self):
return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)
class Opcode99(Opcode):
"""
>>> o = Opcode99([99,12,3,4,5], 0)
>>> o.run()
False
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 99, 1)
def run(self):
return False
def params(self):
return {}
def reads(self):
return []
def writes(self):
return None
def op(self):
return 'HALT'
def __str__(self):
return 'HALT'
<|reserved_special_token_0|>
class Interpreter(object):
def __init__(self, input_code, ops=default_ops()):
self.__memory = input_code
self.__ops = ops
self.__ptr = 0
self.__running = True
self.length = len(self.__memory)
def stepi(self):
o = None
if self.__running:
o = self.next_op()
self.__running = o.run()
chk, val = o.set_ptr()
if chk:
self.__ptr = val
else:
self.__ptr += o.ptr_inc()
return o
def run(self):
while self.__running:
self.stepi()
def inspect(self, loc):
return self.__memory[loc]
def next_op(self):
return self.op_at(self.__ptr)
def op_at(self, ptr):
return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)
def __str__(self):
strs = []
for i, v in enumerate(self.__memory):
if i == self.__ptr:
strs.append('{:*>4}'.format(v))
else:
strs.append('{:>4}'.format(v))
return ','.join(strs) + '\n' + 'Next:\n\t' + str(self.next_op())
def poke(self, loc, val):
self.__memory[loc] = val
def rebind(self, code, call):
self.__ops[code] = call
def as_opcodes(self):
ops = [self.op_at(0)]
ptr = ops[-1].ptr_inc()
while ops[-1].op() != 'HALT':
ops.append(self.op_at(ptr))
ptr += ops[-1].ptr_inc()
return ops
class ValueNode(object):
def __init__(self, val, tag=''):
self.__val = val
self.__tag = tag
def __str__(self):
return self.__tag + str(self.__val)
class OpNode(object):
def __init__(self, op, depends):
self.__op = op
self.__depends = depends
def __str__(self):
return '(' + self.__op.op().join([str(i) for i in self.__depends]
) + ')'
class OpcodeTreeBuilder(object):
def __init__(self, interp):
self.__interpreter = interp
self.__codes = interp.as_opcodes()
def construct_mappings(self):
for i in self.__codes:
params = i.params()
if 'result' in params.keys():
if params['result'] not in self.__writes_to.keys():
self.__writes_to[params['result']] = []
self.__writes_to[params['result']].append(i)
if 'noun' in params.keys():
if params['noun'] not in self.__reads_from.keys():
self.__reads_from[params['noun']] = []
self.__reads_from[params['noun']].append(i)
if 'verb' in params.keys():
if params['verb'] not in self.__reads_from.keys():
self.__reads_from[params['verb']] = []
self.__reads_from[params['verb']].append(i)
def construct_graph(self):
op = self.__interpreter.op_at(0)
reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for
i in op.reads()]
writes = op.writes()
base = OpNode(op, reads)
ptr = op.ptr_inc()
last_write = {}
if writes:
last_write[writes] = base
while op.op() != 'HALT':
op = self.__interpreter.op_at(ptr)
if op.op() == 'HALT':
break
depends = []
for i in op.reads():
if i in last_write.keys():
depends.append(last_write[i])
else:
depends.append(ValueNode(self.__interpreter.inspect(i)))
base = OpNode(op, depends)
if op.writes():
last_write[op.writes()] = base
ptr += op.ptr_inc()
return base
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/python3
def file_to_code(fname):
mem = []
for line in open(fname,"r"):
mem.extend([int(i) for i in line.split(",")])
return mem
class Opcode(object):
def __init__(self, mem, ptr, code, inc):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o._Opcode__par_modes
[0, 1]
"""
if mem[ptr]%100 != code:
raise Exception("Creating Opcode%d for opcode %d"%(code, mem[ptr]))
self.memory = mem
self.ptr = ptr
self.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr]/100))]))
self.__ptr_inc = inc
def ptr_inc(self):
return self.__ptr_inc
def get_val(self, arg_idx):
"""
>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)
>>> o.get_val(1)
4
>>> o.get_val(2)
4
>>> o.get_val(3)
2
"""
idx = arg_idx-1
if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:
return self.memory[self.memory[self.ptr+arg_idx]]
elif self.__par_modes[idx] == 1:
return self.memory[self.ptr + arg_idx]
def set_ptr(self):
return False,0
def reads(self):
raise Exception("Call to base class reads()")
def writes(self):
raise Exception("Call to base class writes()")
def op(self):
raise Exception("Call to base class op()")
def params(self):
raise Exception("Call to base class params()")
def run(self):
raise Exception("Call to base class run()")
class Opcode1(Opcode):
"""
>>> o = Opcode1([101, 2, 1, 3], 0)
>>> o.run()
True
>>> o.memory
[101, 2, 1, 4]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 1, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr+3]
def run(self):
self.memory[self.__res] = self.__first + self.__second
return True
def params(self):
return {'noun':self.__first, 'verb':self.__second, 'result':self.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return "+"
def __str__(self):
return "loc[%d] = %d + %d"%(self.__res,self.__first,self.__second)
class Opcode2(Opcode):
"""
>>> o = Opcode2([2, 2, 3, 4, 99], 0)
>>> o.run()
True
>>> o.memory
[2, 2, 3, 4, 12]
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 2, 4)
self.__first = self.get_val(1)
self.__second = self.get_val(2)
self.__res = mem[ptr+3]
def run(self):
self.memory[self.__res] = self.__first * self.__second
return True
def params(self):
return {'noun':self.__first, 'verb':self.__second, 'result':self.__res}
def reads(self):
return [self.__first, self.__second]
def writes(self):
return self.__res
def op(self):
return "*"
def __str__(self):
return "loc[%d] = %d * %d"%(self.__res,self.__first,self.__second)
class Opcode99(Opcode):
"""
>>> o = Opcode99([99,12,3,4,5], 0)
>>> o.run()
False
"""
def __init__(self, mem, ptr):
super().__init__(mem, ptr, 99, 1)
def run(self):
return False
def params(self):
return {}
def reads(self):
return []
def writes(self):
return None
def op(self):
return "HALT"
def __str__(self):
return "HALT"
def default_ops():
return {1:Opcode1,2:Opcode2,99:Opcode99}
class Interpreter(object):
def __init__(self, input_code, ops=default_ops()):
self.__memory = input_code
self.__ops = ops
self.__ptr = 0
self.__running = True
self.length = len(self.__memory)
def stepi(self):
o = None
if self.__running:
o = self.next_op()
self.__running = o.run()
chk,val = o.set_ptr()
if chk:
self.__ptr = val
else:
self.__ptr += o.ptr_inc()
return o
def run(self):
while self.__running:
self.stepi()
def inspect(self,loc):
return self.__memory[loc]
def next_op(self):
return self.op_at(self.__ptr)
def op_at(self, ptr):
return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)
def __str__(self):
strs = []
for i,v in enumerate(self.__memory):
if i == self.__ptr:
strs.append("{:*>4}".format(v))
else:
strs.append("{:>4}".format(v))
return ",".join(strs) + "\n" + "Next:\n\t" + str(self.next_op())
def poke(self,loc,val):
self.__memory[loc] = val
def rebind(self,code,call):
self.__ops[code] = call
def as_opcodes(self):
ops = [self.op_at(0)]
ptr = ops[-1].ptr_inc()
while ops[-1].op() != "HALT":
ops.append(self.op_at(ptr))
ptr += ops[-1].ptr_inc()
return ops
class ValueNode(object):
def __init__(self,val,tag=''):
self.__val = val
self.__tag = tag
def __str__(self):
return self.__tag + str(self.__val)
class OpNode(object):
def __init__(self,op,depends):
self.__op = op
self.__depends = depends
def __str__(self):
return "(" + self.__op.op().join([str(i) for i in self.__depends]) + ")"
class OpcodeTreeBuilder(object):
def __init__(self, interp):
self.__interpreter = interp
self.__codes = interp.as_opcodes()
def construct_mappings(self):
for i in self.__codes:
params = i.params()
if 'result' in params.keys():
if params['result'] not in self.__writes_to.keys():
self.__writes_to[params['result']] = []
self.__writes_to[params['result']].append(i)
if 'noun' in params.keys():
if params['noun'] not in self.__reads_from.keys():
self.__reads_from[params['noun']] = []
self.__reads_from[params['noun']].append(i)
if 'verb' in params.keys():
if params['verb'] not in self.__reads_from.keys():
self.__reads_from[params['verb']] = []
self.__reads_from[params['verb']].append(i)
def construct_graph(self):
op = self.__interpreter.op_at(0)
reads = [ValueNode(self.__interpreter.inspect(i),tag="raw%d_"%(i)) for i in op.reads()]
writes = op.writes()
base = OpNode(op,reads)
ptr = op.ptr_inc()
last_write = {}
if writes:
last_write[writes] = base
while op.op() != "HALT":
op = self.__interpreter.op_at(ptr)
if op.op() == "HALT":
break
depends = []
for i in op.reads():
if i in last_write.keys():
depends.append(last_write[i])
else:
depends.append(ValueNode(self.__interpreter.inspect(i)))
base = OpNode(op,depends)
if op.writes():
last_write[op.writes()] = base
ptr += op.ptr_inc()
return base
if __name__=='__main__':
import doctest
doctest.testmod()
#################################################
# i = Interpreter(file_to_code("day2_input.txt"))
# i.run()
# i.inspect(0)
|
flexible
|
{
"blob_id": "653e65281984ebb06467aeadb6f0e2b11f1bcb4d",
"index": 496,
"step-1": "<mask token>\n\n\nclass Opcode1(Opcode):\n <mask token>\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def op(self):\n return '+'\n <mask token>\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Opcode1(Opcode):\n <mask token>\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n <mask token>\n <mask token>\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '+'\n <mask token>\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Opcode(object):\n <mask token>\n\n def ptr_inc(self):\n return self.__ptr_inc\n\n def get_val(self, arg_idx):\n \"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o.get_val(1)\n\t\t4\n\t\t>>> o.get_val(2)\n\t\t4\n\t\t>>> o.get_val(3)\n\t\t2\n\t\t\"\"\"\n idx = arg_idx - 1\n if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n return self.memory[self.memory[self.ptr + arg_idx]]\n elif self.__par_modes[idx] == 1:\n return self.memory[self.ptr + arg_idx]\n\n def set_ptr(self):\n return False, 0\n\n def reads(self):\n raise Exception('Call to base class reads()')\n <mask token>\n <mask token>\n\n def params(self):\n raise Exception('Call to base class params()')\n\n def run(self):\n raise Exception('Call to base class run()')\n\n\nclass Opcode1(Opcode):\n \"\"\"\n\t>>> o = Opcode1([101, 2, 1, 3], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[101, 2, 1, 4]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '+'\n\n def __str__(self):\n return 'loc[%d] = %d + %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Opcode(object):\n\n def __init__(self, mem, ptr, code, inc):\n \"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o._Opcode__par_modes\n\t\t[0, 1]\n\t\t\"\"\"\n if mem[ptr] % 100 != code:\n raise Exception('Creating Opcode%d for opcode %d' % (code, mem[\n ptr]))\n self.memory = mem\n self.ptr = ptr\n self.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr] /\n 100))]))\n self.__ptr_inc = inc\n\n def ptr_inc(self):\n return self.__ptr_inc\n\n def get_val(self, arg_idx):\n \"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o.get_val(1)\n\t\t4\n\t\t>>> o.get_val(2)\n\t\t4\n\t\t>>> o.get_val(3)\n\t\t2\n\t\t\"\"\"\n idx = arg_idx - 1\n if idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n return self.memory[self.memory[self.ptr + arg_idx]]\n elif self.__par_modes[idx] == 1:\n return self.memory[self.ptr + arg_idx]\n\n def set_ptr(self):\n return False, 0\n\n def reads(self):\n raise Exception('Call to base class reads()')\n\n def writes(self):\n raise Exception('Call to base class writes()')\n <mask token>\n\n def params(self):\n raise Exception('Call to base class params()')\n\n def run(self):\n raise Exception('Call to base class run()')\n\n\nclass Opcode1(Opcode):\n \"\"\"\n\t>>> o = Opcode1([101, 2, 1, 3], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[101, 2, 1, 4]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 1, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first + self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '+'\n\n def __str__(self):\n return 'loc[%d] = %d + %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode2(Opcode):\n \"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 2, 4)\n self.__first = self.get_val(1)\n self.__second = self.get_val(2)\n self.__res = mem[ptr + 3]\n\n def run(self):\n self.memory[self.__res] = self.__first * self.__second\n return True\n\n def params(self):\n return {'noun': self.__first, 'verb': self.__second, 'result': self\n .__res}\n\n def reads(self):\n return [self.__first, self.__second]\n\n def writes(self):\n return self.__res\n\n def op(self):\n return '*'\n\n def __str__(self):\n return 'loc[%d] = %d * %d' % (self.__res, self.__first, self.__second)\n\n\nclass Opcode99(Opcode):\n \"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\n def __init__(self, mem, ptr):\n super().__init__(mem, ptr, 99, 1)\n\n def run(self):\n return False\n\n def params(self):\n return {}\n\n def reads(self):\n return []\n\n def writes(self):\n return None\n\n def op(self):\n return 'HALT'\n\n def __str__(self):\n return 'HALT'\n\n\n<mask token>\n\n\nclass Interpreter(object):\n\n def __init__(self, input_code, ops=default_ops()):\n self.__memory = input_code\n self.__ops = ops\n self.__ptr = 0\n self.__running = True\n self.length = len(self.__memory)\n\n def stepi(self):\n o = None\n if self.__running:\n o = self.next_op()\n self.__running = o.run()\n chk, val = o.set_ptr()\n if chk:\n self.__ptr = val\n else:\n self.__ptr += o.ptr_inc()\n return o\n\n def run(self):\n while self.__running:\n self.stepi()\n\n def inspect(self, loc):\n return self.__memory[loc]\n\n def next_op(self):\n return self.op_at(self.__ptr)\n\n def op_at(self, ptr):\n return self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n def __str__(self):\n strs = []\n for i, v in enumerate(self.__memory):\n if i == self.__ptr:\n strs.append('{:*>4}'.format(v))\n else:\n strs.append('{:>4}'.format(v))\n return ','.join(strs) + '\\n' + 'Next:\\n\\t' + str(self.next_op())\n\n def poke(self, loc, val):\n self.__memory[loc] = val\n\n def rebind(self, code, call):\n self.__ops[code] = call\n\n def as_opcodes(self):\n ops = [self.op_at(0)]\n ptr = ops[-1].ptr_inc()\n while ops[-1].op() != 'HALT':\n ops.append(self.op_at(ptr))\n ptr += ops[-1].ptr_inc()\n return ops\n\n\nclass ValueNode(object):\n\n def __init__(self, val, tag=''):\n self.__val = val\n self.__tag = tag\n\n def __str__(self):\n return self.__tag + str(self.__val)\n\n\nclass OpNode(object):\n\n def __init__(self, op, depends):\n self.__op = op\n self.__depends = depends\n\n def __str__(self):\n return '(' + self.__op.op().join([str(i) for i in self.__depends]\n ) + ')'\n\n\nclass OpcodeTreeBuilder(object):\n\n def __init__(self, interp):\n self.__interpreter = interp\n self.__codes = interp.as_opcodes()\n\n def construct_mappings(self):\n for i in self.__codes:\n params = i.params()\n if 'result' in params.keys():\n if params['result'] not in self.__writes_to.keys():\n self.__writes_to[params['result']] = []\n self.__writes_to[params['result']].append(i)\n if 'noun' in params.keys():\n if params['noun'] not in self.__reads_from.keys():\n self.__reads_from[params['noun']] = []\n self.__reads_from[params['noun']].append(i)\n if 'verb' in params.keys():\n if params['verb'] not in self.__reads_from.keys():\n self.__reads_from[params['verb']] = []\n self.__reads_from[params['verb']].append(i)\n\n def construct_graph(self):\n op = self.__interpreter.op_at(0)\n reads = [ValueNode(self.__interpreter.inspect(i), tag='raw%d_' % i) for\n i in op.reads()]\n writes = op.writes()\n base = OpNode(op, reads)\n ptr = op.ptr_inc()\n last_write = {}\n if writes:\n last_write[writes] = base\n while op.op() != 'HALT':\n op = self.__interpreter.op_at(ptr)\n if op.op() == 'HALT':\n break\n depends = []\n for i in op.reads():\n if i in last_write.keys():\n depends.append(last_write[i])\n else:\n depends.append(ValueNode(self.__interpreter.inspect(i)))\n base = OpNode(op, depends)\n if op.writes():\n last_write[op.writes()] = base\n ptr += op.ptr_inc()\n return base\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python3\n\ndef file_to_code(fname):\n\tmem = []\n\tfor line in open(fname,\"r\"):\n\t\tmem.extend([int(i) for i in line.split(\",\")])\n\treturn mem\n\nclass Opcode(object):\n\tdef __init__(self, mem, ptr, code, inc):\n\t\t\"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o._Opcode__par_modes\n\t\t[0, 1]\n\t\t\"\"\"\n\t\tif mem[ptr]%100 != code:\n\t\t\traise Exception(\"Creating Opcode%d for opcode %d\"%(code, mem[ptr]))\n\t\tself.memory = mem\n\t\tself.ptr = ptr\n\t\tself.__par_modes = list(reversed([int(i) for i in str(int(mem[ptr]/100))]))\n\t\tself.__ptr_inc = inc\n\n\tdef ptr_inc(self):\n\t\treturn self.__ptr_inc\n\n\tdef get_val(self, arg_idx):\n\t\t\"\"\"\n\t\t>>> o = Opcode([1001, 2, 4, 1], 0, 1, 4)\n\t\t>>> o.get_val(1)\n\t\t4\n\t\t>>> o.get_val(2)\n\t\t4\n\t\t>>> o.get_val(3)\n\t\t2\n\t\t\"\"\"\n\t\tidx = arg_idx-1\n\t\tif idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n\t\t\treturn self.memory[self.memory[self.ptr+arg_idx]]\n\t\telif self.__par_modes[idx] == 1:\n\t\t\treturn self.memory[self.ptr + arg_idx]\n\n\tdef set_ptr(self):\n\t\treturn False,0\n\n\tdef reads(self):\n\t\traise Exception(\"Call to base class reads()\")\n\n\tdef writes(self):\n\t\traise Exception(\"Call to base class writes()\")\n\n\tdef op(self):\n\t\traise Exception(\"Call to base class op()\")\n\n\tdef params(self):\n\t\traise Exception(\"Call to base class params()\")\n\n\tdef run(self):\n\t\traise Exception(\"Call to base class run()\")\n\n\nclass Opcode1(Opcode):\n\t\"\"\"\n\t>>> o = Opcode1([101, 2, 1, 3], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[101, 2, 1, 4]\n\t\"\"\"\n\tdef __init__(self, mem, ptr):\n\t\tsuper().__init__(mem, ptr, 1, 4)\n\t\tself.__first = self.get_val(1)\n\t\tself.__second = self.get_val(2)\n\t\tself.__res = mem[ptr+3]\n\n\tdef run(self):\n\t\tself.memory[self.__res] = self.__first + self.__second\n\t\treturn True\n\n\tdef params(self):\n\t\treturn {'noun':self.__first, 'verb':self.__second, 'result':self.__res}\n\n\tdef reads(self):\n\t\treturn [self.__first, self.__second]\n\n\tdef writes(self):\n\t\treturn self.__res\n\n\tdef op(self):\n\t\treturn \"+\"\n\n\tdef __str__(self):\n\t\treturn \"loc[%d] = %d + %d\"%(self.__res,self.__first,self.__second)\n\nclass Opcode2(Opcode):\n\t\"\"\"\n\t>>> o = Opcode2([2, 2, 3, 4, 99], 0)\n\t>>> o.run()\n\tTrue\n\t>>> o.memory\n\t[2, 2, 3, 4, 12]\n\t\"\"\"\n\tdef __init__(self, mem, ptr):\n\t\tsuper().__init__(mem, ptr, 2, 4)\n\t\tself.__first = self.get_val(1)\n\t\tself.__second = self.get_val(2)\n\t\tself.__res = mem[ptr+3]\n\n\tdef run(self):\n\t\tself.memory[self.__res] = self.__first * self.__second\n\t\treturn True\n\n\tdef params(self):\n\t\treturn {'noun':self.__first, 'verb':self.__second, 'result':self.__res}\n\n\tdef reads(self):\n\t\treturn [self.__first, self.__second]\n\n\tdef writes(self):\n\t\treturn self.__res\n\n\tdef op(self):\n\t\treturn \"*\"\n\n\tdef __str__(self):\n\t\treturn \"loc[%d] = %d * %d\"%(self.__res,self.__first,self.__second)\n\nclass Opcode99(Opcode):\n\t\"\"\"\n\t>>> o = Opcode99([99,12,3,4,5], 0)\n\t>>> o.run()\n\tFalse\n\t\"\"\"\n\tdef __init__(self, mem, ptr):\n\t\tsuper().__init__(mem, ptr, 99, 1)\n\n\tdef run(self):\n\t\treturn False\n\n\tdef params(self):\n\t\treturn {}\n\n\tdef reads(self):\n\t\treturn []\n\n\tdef writes(self):\n\t\treturn None\n\n\tdef op(self):\n\t\treturn \"HALT\"\n\n\tdef __str__(self):\n\t\treturn \"HALT\"\n\ndef default_ops():\n\treturn {1:Opcode1,2:Opcode2,99:Opcode99}\n\nclass Interpreter(object):\n\tdef __init__(self, input_code, ops=default_ops()):\n\t\tself.__memory = input_code\n\n\t\tself.__ops = ops\n\t\tself.__ptr = 0\n\t\tself.__running = True\n\t\tself.length = len(self.__memory)\n\n\tdef stepi(self):\n\t\to = None\n\t\tif self.__running:\n\t\t\to = self.next_op()\n\t\t\tself.__running = o.run()\n\t\t\tchk,val = o.set_ptr()\n\t\t\tif chk:\n\t\t\t\tself.__ptr = val\n\t\t\telse:\n\t\t\t\tself.__ptr += o.ptr_inc()\n\t\treturn o\n\n\tdef run(self):\n\t\twhile self.__running:\n\t\t\tself.stepi()\n\n\tdef inspect(self,loc):\n\t\treturn self.__memory[loc]\n\n\tdef next_op(self):\n\t\treturn self.op_at(self.__ptr)\n\n\tdef op_at(self, ptr):\n\t\treturn self.__ops[self.__memory[ptr] % 100](self.__memory, ptr)\n\n\tdef __str__(self):\n\t\tstrs = []\n\t\tfor i,v in enumerate(self.__memory):\n\t\t\tif i == self.__ptr:\n\t\t\t\tstrs.append(\"{:*>4}\".format(v))\n\t\t\telse:\n\t\t\t\tstrs.append(\"{:>4}\".format(v))\n\t\treturn \",\".join(strs) + \"\\n\" + \"Next:\\n\\t\" + str(self.next_op())\n\n\tdef poke(self,loc,val):\n\t\tself.__memory[loc] = val\n\n\tdef rebind(self,code,call):\n\t\tself.__ops[code] = call\n\n\tdef as_opcodes(self):\n\t\tops = [self.op_at(0)]\n\t\tptr = ops[-1].ptr_inc()\n\t\twhile ops[-1].op() != \"HALT\":\n\t\t\tops.append(self.op_at(ptr))\n\t\t\tptr += ops[-1].ptr_inc()\n\t\treturn ops\n\nclass ValueNode(object):\n\tdef __init__(self,val,tag=''):\n\t\tself.__val = val\n\t\tself.__tag = tag\n\n\tdef __str__(self):\n\t\treturn self.__tag + str(self.__val)\n\nclass OpNode(object):\n\tdef __init__(self,op,depends):\n\t\tself.__op = op\n\t\tself.__depends = depends\n\n\tdef __str__(self):\n\t\treturn \"(\" + self.__op.op().join([str(i) for i in self.__depends]) + \")\"\n\nclass OpcodeTreeBuilder(object):\n\tdef __init__(self, interp):\n\t\tself.__interpreter = interp\n\t\tself.__codes = interp.as_opcodes()\n\n\tdef construct_mappings(self):\n\t\tfor i in self.__codes:\n\t\t\tparams = i.params()\n\t\t\tif 'result' in params.keys():\n\t\t\t\tif params['result'] not in self.__writes_to.keys():\n\t\t\t\t\tself.__writes_to[params['result']] = []\n\t\t\t\tself.__writes_to[params['result']].append(i)\n\t\t\tif 'noun' in params.keys():\n\t\t\t\tif params['noun'] not in self.__reads_from.keys():\n\t\t\t\t\tself.__reads_from[params['noun']] = []\n\t\t\t\tself.__reads_from[params['noun']].append(i)\n\t\t\tif 'verb' in params.keys():\n\t\t\t\tif params['verb'] not in self.__reads_from.keys():\n\t\t\t\t\tself.__reads_from[params['verb']] = []\n\t\t\t\tself.__reads_from[params['verb']].append(i)\n\n\tdef construct_graph(self):\n\t\top = self.__interpreter.op_at(0)\n\t\treads = [ValueNode(self.__interpreter.inspect(i),tag=\"raw%d_\"%(i)) for i in op.reads()]\n\t\twrites = op.writes()\n\t\tbase = OpNode(op,reads)\n\t\tptr = op.ptr_inc()\n\t\tlast_write = {}\n\t\tif writes:\n\t\t\tlast_write[writes] = base\n\t\twhile op.op() != \"HALT\":\n\t\t\top = self.__interpreter.op_at(ptr)\n\t\t\tif op.op() == \"HALT\":\n\t\t\t\tbreak\n\t\t\tdepends = []\n\t\t\tfor i in op.reads():\n\t\t\t\tif i in last_write.keys():\n\t\t\t\t\tdepends.append(last_write[i])\n\t\t\t\telse:\n\t\t\t\t\tdepends.append(ValueNode(self.__interpreter.inspect(i)))\n\t\t\tbase = OpNode(op,depends)\n\t\t\tif op.writes():\n\t\t\t\tlast_write[op.writes()] = base\n\t\t\tptr += op.ptr_inc()\n\t\treturn base\n\nif __name__=='__main__':\n\timport doctest\n\tdoctest.testmod()\n\n#################################################\n\n#\ti = Interpreter(file_to_code(\"day2_input.txt\"))\n#\ti.run()\n#\ti.inspect(0)\n",
"step-ids": [
43,
44,
55,
57,
62
]
}
|
[
43,
44,
55,
57,
62
] |
from __future__ import unicode_literals
from functools import partial
from django.contrib.auth import get_user_model
from .default_settings import settings
from . import signals
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
# build kwargs to pass to the signal handler
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
# keep the strong ref on the request, its a sane lifetime
request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)
def process_response(self, request, response):
# disconnect signals for this request
# runs even if change logging is disabled in case it was disabled after the signal was created
signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))
return response
def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
normal
|
{
"blob_id": "0e03a3b3401075384e580bc2bb8af1a106f1d238",
"index": 2141,
"step-1": "<mask token>\n\n\nclass AuditMiddleware(object):\n <mask token>\n <mask token>\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AuditMiddleware(object):\n <mask token>\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-3": "<mask token>\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-4": "from __future__ import unicode_literals\nfrom functools import partial\nfrom django.contrib.auth import get_user_model\nfrom .default_settings import settings\nfrom . import signals\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-5": "from __future__ import unicode_literals\n\nfrom functools import partial\nfrom django.contrib.auth import get_user_model\n\nfrom .default_settings import settings\nfrom . import signals\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n\n user = getattr(request, 'user', None)\n\n if user and not user.is_authenticated():\n user = None\n\n # build kwargs to pass to the signal handler\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n\n # keep the strong ref on the request, its a sane lifetime\n request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)\n\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)\n\n def process_response(self, request, response):\n # disconnect signals for this request\n # runs even if change logging is disabled in case it was disabled after the signal was created\n signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))\n\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
def parse(num):
strnum = str(num)
words = []
for item in range(len(strnum)-1, -1, -1):
words.append(strnum[item])
hundred = words[:3]
thousand = words[3:6]
million = words[6:len(words)]
hundred = hundred[::-1]
thousand = thousand[::-1]
million = million[::-1]
units = ['zero','one','two','three','four','five','six','seven','eight','nine']
tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']
reads = []
if len(million)>0:
if len(million)==3:
num = int(million[0])
reads.append(units[num])
reads.append('hundred')
reads.append('and')
num = int(million[1])
if num>1:
reads.append(tens_more[num])
if num!=0:
num = int(million[2])
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million)==2:
num = int(million[0])
if num>1:
reads.append(tens_more[num])
num = int(million[1])
if num!=0:
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million)==1:
num = int(million[0])
reads.append(units[num])
reads.append('million')
reads.append('and')
if __name__ == "__main__":
parse(23456789)
|
normal
|
{
"blob_id": "843901b65a556e57470f73be2657e9fd3c0facc6",
"index": 9721,
"step-1": "<mask token>\n",
"step-2": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum) - 1, -1, -1):\n words.append(strnum[item])\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',\n 'sixty', 'seventy', 'eighty', 'ninety']\n reads = []\n if len(million) > 0:\n if len(million) == 3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n num = int(million[1])\n if num > 1:\n reads.append(tens_more[num])\n if num != 0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 2:\n num = int(million[0])\n if num > 1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num != 0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 1:\n num = int(million[0])\n reads.append(units[num])\n reads.append('million')\n reads.append('and')\n\n\n<mask token>\n",
"step-3": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum) - 1, -1, -1):\n words.append(strnum[item])\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',\n 'sixty', 'seventy', 'eighty', 'ninety']\n reads = []\n if len(million) > 0:\n if len(million) == 3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n num = int(million[1])\n if num > 1:\n reads.append(tens_more[num])\n if num != 0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 2:\n num = int(million[0])\n if num > 1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num != 0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 1:\n num = int(million[0])\n reads.append(units[num])\n reads.append('million')\n reads.append('and')\n\n\nif __name__ == '__main__':\n parse(23456789)\n",
"step-4": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum)-1, -1, -1):\n words.append(strnum[item])\n\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n\n units = ['zero','one','two','three','four','five','six','seven','eight','nine']\n tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']\n tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']\n\n reads = []\n if len(million)>0:\n if len(million)==3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n\n num = int(million[1])\n if num>1:\n reads.append(tens_more[num])\n if num!=0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n\n if len(million)==2:\n num = int(million[0])\n if num>1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num!=0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n \n if len(million)==1:\n num = int(million[0])\n reads.append(units[num])\n\n reads.append('million')\n reads.append('and')\n\nif __name__ == \"__main__\":\n parse(23456789)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics
=['accuracy'])
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(seed)
print(__doc__)
<|reserved_special_token_0|>
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics
=['accuracy'])
return model
<|reserved_special_token_0|>
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',
validation_data=(X1, Y1), callbacks=[checkpointer])
<|reserved_special_token_0|>
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
model.load_weights(filepath)
<|reserved_special_token_0|>
print(confusion_matrix(true_labels, predictions))
<|reserved_special_token_0|>
with open(json_file, 'w') as json_file:
json_file.write(model_json)
model.save_weights(h5_file)
print('Saved model to disk')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
seed = 7
np.random.seed(seed)
print(__doc__)
<|reserved_special_token_0|>
trn_file = sys.argv[1]
tst_file = sys.argv[2]
json_file = sys.argv[3]
h5_file = sys.argv[4]
nb_classes = 2
nb_kernels = 3
nb_pools = 2
window_sizes = 19
dataset = np.loadtxt(trn_file, delimiter=',')
X = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)
Y = dataset[:, window_sizes * 20]
Y = np_utils.to_categorical(Y, nb_classes)
dataset1 = np.loadtxt(tst_file, delimiter=',')
X1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,
window_sizes)
Y1 = dataset1[:, window_sizes * 20]
true_labels = np.asarray(Y1)
Y1 = np_utils.to_categorical(Y1, nb_classes)
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics
=['accuracy'])
return model
model = cnn_model()
filepath = 'weights.best.hdf5'
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True, save_weights_only=True)
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',
validation_data=(X1, Y1), callbacks=[checkpointer])
scores = model.evaluate(X1, Y1)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
model.load_weights(filepath)
predictions = model.predict_classes(X1)
print(confusion_matrix(true_labels, predictions))
model_json = model.to_json()
with open(json_file, 'w') as json_file:
json_file.write(model_json)
model.save_weights(h5_file)
print('Saved model to disk')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.utils import np_utils
from keras.layers.convolutional import Convolution2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout, Flatten
from keras.callbacks import ModelCheckpoint
import numpy as np
from sklearn.metrics import confusion_matrix
seed = 7
np.random.seed(seed)
print(__doc__)
import h5py
import os
import sys
from keras.models import model_from_json
trn_file = sys.argv[1]
tst_file = sys.argv[2]
json_file = sys.argv[3]
h5_file = sys.argv[4]
nb_classes = 2
nb_kernels = 3
nb_pools = 2
window_sizes = 19
dataset = np.loadtxt(trn_file, delimiter=',')
X = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)
Y = dataset[:, window_sizes * 20]
Y = np_utils.to_categorical(Y, nb_classes)
dataset1 = np.loadtxt(tst_file, delimiter=',')
X1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,
window_sizes)
Y1 = dataset1[:, window_sizes * 20]
true_labels = np.asarray(Y1)
Y1 = np_utils.to_categorical(Y1, nb_classes)
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics
=['accuracy'])
return model
model = cnn_model()
filepath = 'weights.best.hdf5'
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True, save_weights_only=True)
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',
validation_data=(X1, Y1), callbacks=[checkpointer])
scores = model.evaluate(X1, Y1)
print('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
model.load_weights(filepath)
predictions = model.predict_classes(X1)
print(confusion_matrix(true_labels, predictions))
model_json = model.to_json()
with open(json_file, 'w') as json_file:
json_file.write(model_json)
model.save_weights(h5_file)
print('Saved model to disk')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 16:04:19 2018
@author: khanhle
"""
# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.utils import np_utils
from keras.layers.convolutional import Convolution2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout, Flatten
from keras.callbacks import ModelCheckpoint
import numpy as np
from sklearn.metrics import confusion_matrix
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
print(__doc__)
import h5py
import os
import sys
from keras.models import model_from_json
#define params
trn_file = sys.argv[1]
tst_file = sys.argv[2]
json_file = sys.argv[3]
h5_file = sys.argv[4]
nb_classes = 2
nb_kernels = 3
nb_pools = 2
window_sizes = 19
# load training dataset
dataset = np.loadtxt(trn_file, delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)
Y = dataset[:,window_sizes*20]
Y = np_utils.to_categorical(Y,nb_classes)
#print X,Y
#nb_classes = Y.shape[1]
#print nb_classes
# load testing dataset
dataset1 = np.loadtxt(tst_file, delimiter=",")
# split into input (X) and output (Y) variables
X1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)
Y1 = dataset1[:,window_sizes*20]
true_labels = np.asarray(Y1)
Y1 = np_utils.to_categorical(Y1,nb_classes)
#print('label : ', Y[i,:])
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
# model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
## add the model on top of the convolutional base
#model.add(top_model)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
#model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(nb_classes))
#model.add(BatchNormalization())
model.add(Activation('softmax'))
# f = open('model_summary.txt','w')
# f.write(str(model.summary()))
# f.close()
#model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
#plot_filters(model.layers[0],32,1)
# Fit the model
# save best weights
model = cnn_model()
#plot_model(model, to_file='model.png')
filepath = "weights.best.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
# balance data
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])
## evaluate the model
scores = model.evaluate(X1, Y1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
model.load_weights(filepath)
predictions = model.predict_classes(X1)
print(confusion_matrix(true_labels, predictions))
#serialize model to JSON
model_json = model.to_json()
with open(json_file, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(h5_file)
print("Saved model to disk")
|
flexible
|
{
"blob_id": "721f23d2b6109194b8bca54b1cd04263e30cdf24",
"index": 3964,
"step-1": "<mask token>\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(seed)\nprint(__doc__)\n<mask token>\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\n<mask token>\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\n<mask token>\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\n<mask token>\nprint(confusion_matrix(true_labels, predictions))\n<mask token>\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-3": "<mask token>\nseed = 7\nnp.random.seed(seed)\nprint(__doc__)\n<mask token>\ntrn_file = sys.argv[1]\ntst_file = sys.argv[2]\njson_file = sys.argv[3]\nh5_file = sys.argv[4]\nnb_classes = 2\nnb_kernels = 3\nnb_pools = 2\nwindow_sizes = 19\ndataset = np.loadtxt(trn_file, delimiter=',')\nX = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)\nY = dataset[:, window_sizes * 20]\nY = np_utils.to_categorical(Y, nb_classes)\ndataset1 = np.loadtxt(tst_file, delimiter=',')\nX1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,\n window_sizes)\nY1 = dataset1[:, window_sizes * 20]\ntrue_labels = np.asarray(Y1)\nY1 = np_utils.to_categorical(Y1, nb_classes)\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\nmodel = cnn_model()\nfilepath = 'weights.best.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=True)\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\nscores = model.evaluate(X1, Y1)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\npredictions = model.predict_classes(X1)\nprint(confusion_matrix(true_labels, predictions))\nmodel_json = model.to_json()\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-4": "<mask token>\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Activation\nfrom keras.utils import np_utils\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.core import Dropout, Flatten\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nseed = 7\nnp.random.seed(seed)\nprint(__doc__)\nimport h5py\nimport os\nimport sys\nfrom keras.models import model_from_json\ntrn_file = sys.argv[1]\ntst_file = sys.argv[2]\njson_file = sys.argv[3]\nh5_file = sys.argv[4]\nnb_classes = 2\nnb_kernels = 3\nnb_pools = 2\nwindow_sizes = 19\ndataset = np.loadtxt(trn_file, delimiter=',')\nX = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)\nY = dataset[:, window_sizes * 20]\nY = np_utils.to_categorical(Y, nb_classes)\ndataset1 = np.loadtxt(tst_file, delimiter=',')\nX1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,\n window_sizes)\nY1 = dataset1[:, window_sizes * 20]\ntrue_labels = np.asarray(Y1)\nY1 = np_utils.to_categorical(Y1, nb_classes)\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\nmodel = cnn_model()\nfilepath = 'weights.best.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=True)\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\nscores = model.evaluate(X1, Y1)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\npredictions = model.predict_classes(X1)\nprint(confusion_matrix(true_labels, predictions))\nmodel_json = model.to_json()\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 3 16:04:19 2018\r\n\r\n@author: khanhle\r\n\"\"\"\r\n\r\n\r\n\r\n# Create first network with Keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Activation\r\nfrom keras.utils import np_utils\r\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D\r\nfrom keras.layers.pooling import MaxPooling2D\r\nfrom keras.layers.core import Dropout, Flatten\r\nfrom keras.callbacks import ModelCheckpoint\r\nimport numpy as np\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n# fix random seed for reproducibility\r\nseed = 7\r\nnp.random.seed(seed)\r\n\r\nprint(__doc__)\r\n\r\nimport h5py\r\nimport os\r\nimport sys\r\nfrom keras.models import model_from_json\r\n\r\n#define params\r\ntrn_file = sys.argv[1]\r\ntst_file = sys.argv[2]\r\njson_file = sys.argv[3]\r\nh5_file = sys.argv[4]\r\n\r\nnb_classes = 2\r\nnb_kernels = 3\r\nnb_pools = 2\r\nwindow_sizes = 19\r\n\r\n# load training dataset\r\ndataset = np.loadtxt(trn_file, delimiter=\",\")\r\n# split into input (X) and output (Y) variables\r\nX = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)\r\nY = dataset[:,window_sizes*20]\r\n\r\nY = np_utils.to_categorical(Y,nb_classes)\r\n#print X,Y\r\n#nb_classes = Y.shape[1]\r\n#print nb_classes\r\n\r\n# load testing dataset\r\ndataset1 = np.loadtxt(tst_file, delimiter=\",\")\r\n# split into input (X) and output (Y) variables\r\nX1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)\r\nY1 = dataset1[:,window_sizes*20]\r\ntrue_labels = np.asarray(Y1)\r\n\r\nY1 = np_utils.to_categorical(Y1,nb_classes)\r\n#print('label : ', Y[i,:])\r\n\r\ndef cnn_model():\r\n model = Sequential()\r\n\r\n model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))\r\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n # model.add(ZeroPadding2D((1,1)))\r\n # model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(Activation('relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n # model.add(ZeroPadding2D((1,1)))\r\n # model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n ## add the model on top of the convolutional base\r\n #model.add(top_model)\r\n model.add(Flatten())\r\n model.add(Dropout(0.5))\r\n model.add(Dense(128))\r\n #model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n\r\n model.add(Dense(nb_classes))\r\n #model.add(BatchNormalization())\r\n model.add(Activation('softmax'))\r\n\r\n # f = open('model_summary.txt','w')\r\n # f.write(str(model.summary()))\r\n # f.close()\r\n\r\n #model.compile(loss='categorical_crossentropy', optimizer='adadelta')\r\n # Compile model\r\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])\r\n return model\r\n\r\n#plot_filters(model.layers[0],32,1)\r\n# Fit the model\r\n# save best weights\r\nmodel = cnn_model()\r\n#plot_model(model, to_file='model.png')\r\n\r\nfilepath = \"weights.best.hdf5\"\r\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)\r\n# balance data\r\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])\r\n## evaluate the model\r\nscores = model.evaluate(X1, Y1)\r\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\r\n\r\nmodel.load_weights(filepath)\r\npredictions = model.predict_classes(X1)\r\n\r\nprint(confusion_matrix(true_labels, predictions))\r\n\r\n#serialize model to JSON\r\nmodel_json = model.to_json()\r\nwith open(json_file, \"w\") as json_file:\r\n json_file.write(model_json)\r\n# serialize weights to HDF5\r\nmodel.save_weights(h5_file)\r\nprint(\"Saved model to disk\")\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MyUserManager(BaseUserManager):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyUserManager(BaseUserManager):
<|reserved_special_token_0|>
def create_superuser(self, email, password, full_name, national_code,
mobile, address):
user = self.create_user(email, password, full_name, national_code,
mobile, address)
user.is_admin = True
user.save(using=self._db)
return user
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyUserManager(BaseUserManager):
def create_user(self, email, password, full_name, national_code, mobile,
address):
if not email:
raise ValueError('ایمیل الزامی است')
if not full_name:
raise ValueError('نام و نام خانوادگی الزامی است')
if not national_code:
raise ValueError('کدملی الزامی است')
if not mobile:
raise ValueError('موبایل الزامی است')
if not address:
raise ValueError('آدرس الزامی است')
user = self.model(email=self.normalize_email(email), full_name=
full_name, national_code=national_code, mobile=mobile, address=
address)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, full_name, national_code,
mobile, address):
user = self.create_user(email, password, full_name, national_code,
mobile, address)
user.is_admin = True
user.save(using=self._db)
return user
<|reserved_special_token_1|>
from django.contrib.auth.models import BaseUserManager
class MyUserManager(BaseUserManager):
def create_user(self, email, password, full_name, national_code, mobile,
address):
if not email:
raise ValueError('ایمیل الزامی است')
if not full_name:
raise ValueError('نام و نام خانوادگی الزامی است')
if not national_code:
raise ValueError('کدملی الزامی است')
if not mobile:
raise ValueError('موبایل الزامی است')
if not address:
raise ValueError('آدرس الزامی است')
user = self.model(email=self.normalize_email(email), full_name=
full_name, national_code=national_code, mobile=mobile, address=
address)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, full_name, national_code,
mobile, address):
user = self.create_user(email, password, full_name, national_code,
mobile, address)
user.is_admin = True
user.save(using=self._db)
return user
<|reserved_special_token_1|>
from django.contrib.auth.models import BaseUserManager
class MyUserManager(BaseUserManager):
def create_user(self, email, password, full_name, national_code, mobile, address):
if not email :
raise ValueError('ایمیل الزامی است')
if not full_name :
raise ValueError('نام و نام خانوادگی الزامی است')
if not national_code :
raise ValueError('کدملی الزامی است')
if not mobile :
raise ValueError('موبایل الزامی است')
if not address :
raise ValueError('آدرس الزامی است')
user = self.model(
email = self.normalize_email(email) ,
full_name = full_name ,
national_code = national_code ,
mobile = mobile ,
address = address,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, full_name, national_code, mobile, address):
user = self.create_user(email, password, full_name, national_code, mobile, address)
user.is_admin = True
user.save(using=self._db)
return user
|
flexible
|
{
"blob_id": "f5f14e4d114855b7eef555db182ee991bdf26c39",
"index": 8832,
"step-1": "<mask token>\n\n\nclass MyUserManager(BaseUserManager):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MyUserManager(BaseUserManager):\n <mask token>\n\n def create_superuser(self, email, password, full_name, national_code,\n mobile, address):\n user = self.create_user(email, password, full_name, national_code,\n mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n",
"step-3": "<mask token>\n\n\nclass MyUserManager(BaseUserManager):\n\n def create_user(self, email, password, full_name, national_code, mobile,\n address):\n if not email:\n raise ValueError('ایمیل الزامی است')\n if not full_name:\n raise ValueError('نام و نام خانوادگی الزامی است')\n if not national_code:\n raise ValueError('کدملی الزامی است')\n if not mobile:\n raise ValueError('موبایل الزامی است')\n if not address:\n raise ValueError('آدرس الزامی است')\n user = self.model(email=self.normalize_email(email), full_name=\n full_name, national_code=national_code, mobile=mobile, address=\n address)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password, full_name, national_code,\n mobile, address):\n user = self.create_user(email, password, full_name, national_code,\n mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n",
"step-4": "from django.contrib.auth.models import BaseUserManager\n\n\nclass MyUserManager(BaseUserManager):\n\n def create_user(self, email, password, full_name, national_code, mobile,\n address):\n if not email:\n raise ValueError('ایمیل الزامی است')\n if not full_name:\n raise ValueError('نام و نام خانوادگی الزامی است')\n if not national_code:\n raise ValueError('کدملی الزامی است')\n if not mobile:\n raise ValueError('موبایل الزامی است')\n if not address:\n raise ValueError('آدرس الزامی است')\n user = self.model(email=self.normalize_email(email), full_name=\n full_name, national_code=national_code, mobile=mobile, address=\n address)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password, full_name, national_code,\n mobile, address):\n user = self.create_user(email, password, full_name, national_code,\n mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n",
"step-5": "from django.contrib.auth.models import BaseUserManager\n\n\nclass MyUserManager(BaseUserManager):\n def create_user(self, email, password, full_name, national_code, mobile, address):\n if not email :\n raise ValueError('ایمیل الزامی است')\n if not full_name :\n raise ValueError('نام و نام خانوادگی الزامی است')\n if not national_code :\n raise ValueError('کدملی الزامی است')\n if not mobile :\n raise ValueError('موبایل الزامی است')\n if not address :\n raise ValueError('آدرس الزامی است')\n\n user = self.model(\n email = self.normalize_email(email) ,\n full_name = full_name ,\n national_code = national_code ,\n mobile = mobile ,\n address = address,\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n \n def create_superuser(self, email, password, full_name, national_code, mobile, address):\n user = self.create_user(email, password, full_name, national_code, mobile, address)\n user.is_admin = True\n user.save(using=self._db)\n return user\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import unittest, warnings
from pony.orm import *
from pony.orm import core
from pony.orm.tests.testutils import raises_exception
db = Database('sqlite', ':memory:')
class Person(db.Entity):
id = PrimaryKey(int)
name = Required(str)
tel = Optional(str)
db.generate_mapping(check_tables=False)
with db_session:
db.execute("""
create table Person(
id int primary key,
name text,
tel text
)
""")
warnings.simplefilter('error', )
class TestValidate(unittest.TestCase):
@db_session
def setUp(self):
db.execute('delete from Person')
registry = getattr(core, '__warningregistry__', {})
for key in list(registry):
if type(key) is not tuple: continue
text, category, lineno = key
if category is DatabaseContainsIncorrectEmptyValue:
del registry[key]
@db_session
def test_1a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, '')
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains empty string for required attribute Person.name')
@db_session
def test_1b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name='', tel='111')
p = Person.get(id=1)
@db_session
def test_2a(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
self.assertEqual(p.name, None)
@raises_exception(DatabaseContainsIncorrectEmptyValue,
'Database contains NULL for required attribute Person.name')
@db_session
def test_2b(self):
with warnings.catch_warnings():
warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)
db.insert('Person', id=1, name=None, tel='111')
p = Person.get(id=1)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "33c39b098cb9d3368b8f74a7433e0943fe252da5",
"index": 5672,
"step-1": "<mask token>\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person(db.Entity):\n id = PrimaryKey(int)\n name = Required(str)\n tel = Optional(str)\n\n\n<mask token>\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person(db.Entity):\n id = PrimaryKey(int)\n name = Required(str)\n tel = Optional(str)\n\n\ndb.generate_mapping(check_tables=False)\nwith db_session:\n db.execute(\n \"\"\"\n create table Person(\n id int primary key,\n name text,\n tel text\n )\n \"\"\"\n )\nwarnings.simplefilter('error')\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\ndb = Database('sqlite', ':memory:')\n\n\nclass Person(db.Entity):\n id = PrimaryKey(int)\n name = Required(str)\n tel = Optional(str)\n\n\ndb.generate_mapping(check_tables=False)\nwith db_session:\n db.execute(\n \"\"\"\n create table Person(\n id int primary key,\n name text,\n tel text\n )\n \"\"\"\n )\nwarnings.simplefilter('error')\n\n\nclass TestValidate(unittest.TestCase):\n\n @db_session\n def setUp(self):\n db.execute('delete from Person')\n registry = getattr(core, '__warningregistry__', {})\n for key in list(registry):\n if type(key) is not tuple:\n continue\n text, category, lineno = key\n if category is DatabaseContainsIncorrectEmptyValue:\n del registry[key]\n\n @db_session\n def test_1a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, '')\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains empty string for required attribute Person.name')\n @db_session\n def test_1b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name='', tel='111')\n p = Person.get(id=1)\n\n @db_session\n def test_2a(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue\n )\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n self.assertEqual(p.name, None)\n\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\n 'Database contains NULL for required attribute Person.name')\n @db_session\n def test_2b(self):\n with warnings.catch_warnings():\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\n db.insert('Person', id=1, name=None, tel='111')\n p = Person.get(id=1)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest, warnings\r\n\r\nfrom pony.orm import *\r\nfrom pony.orm import core\r\nfrom pony.orm.tests.testutils import raises_exception\r\n\r\ndb = Database('sqlite', ':memory:')\r\n\r\nclass Person(db.Entity):\r\n id = PrimaryKey(int)\r\n name = Required(str)\r\n tel = Optional(str)\r\n\r\ndb.generate_mapping(check_tables=False)\r\n\r\nwith db_session:\r\n db.execute(\"\"\"\r\n create table Person(\r\n id int primary key,\r\n name text,\r\n tel text\r\n )\r\n \"\"\")\r\n\r\nwarnings.simplefilter('error', )\r\n\r\n\r\nclass TestValidate(unittest.TestCase):\r\n\r\n @db_session\r\n def setUp(self):\r\n db.execute('delete from Person')\r\n registry = getattr(core, '__warningregistry__', {})\r\n for key in list(registry):\r\n if type(key) is not tuple: continue\r\n text, category, lineno = key\r\n if category is DatabaseContainsIncorrectEmptyValue:\r\n del registry[key]\r\n\r\n @db_session\r\n def test_1a(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name='', tel='111')\r\n p = Person.get(id=1)\r\n self.assertEqual(p.name, '')\r\n\r\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\r\n 'Database contains empty string for required attribute Person.name')\r\n @db_session\r\n def test_1b(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name='', tel='111')\r\n p = Person.get(id=1)\r\n\r\n @db_session\r\n def test_2a(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name=None, tel='111')\r\n p = Person.get(id=1)\r\n self.assertEqual(p.name, None)\r\n\r\n @raises_exception(DatabaseContainsIncorrectEmptyValue,\r\n 'Database contains NULL for required attribute Person.name')\r\n @db_session\r\n def test_2b(self):\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('error', DatabaseContainsIncorrectEmptyValue)\r\n db.insert('Person', id=1, name=None, tel='111')\r\n p = Person.get(id=1)\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
<|reserved_special_token_0|>
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
<|reserved_special_token_0|>
def predict(self, X):
return self._assign_clusters(X)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
<|reserved_special_token_0|>
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
cluster_labels = self._assign_clusters(X)
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric=
'euclidean')
cluster_labels = np.argmin(cluster_distances, axis=1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
cluster_labels = self._assign_clusters(X)
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KMeans:
def __init__(self, k=5, max_iters=100, random_seed=42):
self.k = k
self.max_iters = max_iters
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids) ** 2, axis=1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric=
'euclidean')
cluster_labels = np.argmin(cluster_distances, axis=1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
X_cluster = X[cluster_labels == cluster]
cluster_mean = np.mean(X_cluster, axis=0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
cluster_labels = self._assign_clusters(X)
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test = train_test_split(X, test_size=0.1)
model = KMeans(k=5)
model.fit(X_train)
y_pred = model.predict(X_test)
print(y_pred)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import math
from sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import pairwise_distances
class KMeans():
def __init__(self, k = 5, max_iters = 100, random_seed = 42):
self.k = k
self.max_iters = max_iters
# Set random seed
np.random.seed(random_seed)
def _initialise_centroids(self, X):
random_indices = np.random.permutation(X.shape[0])
random_indices = random_indices[:self.k]
self.centroids = X[random_indices]
def _euclidien_distance(self, x):
return np.sum((x - self.centroids)**2, axis = 1)
def _assign_clusters(self, X):
cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')
cluster_labels = np.argmin(cluster_distances, axis = 1)
return cluster_labels
def _update_centroids(self, X, cluster_labels):
for cluster in range(self.k):
# Get all data points of a cluster
X_cluster = X[cluster_labels == cluster]
# Update the cluster's centroid
cluster_mean = np.mean(X_cluster, axis = 0)
self.centroids[cluster] = cluster_mean
def fit(self, X):
# Initialise random centroids
self._initialise_centroids(X)
iterations = 0
while iterations <= self.max_iters:
iterations += 1
# Assign clusters to data
cluster_labels = self._assign_clusters(X)
# Update centroids
self._update_centroids(X, cluster_labels)
def predict(self, X):
return self._assign_clusters(X)
# Load data
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test = train_test_split(X, test_size = 0.1)
# Fit model
model = KMeans(k = 5)
model.fit(X_train)
# Predict
y_pred = model.predict(X_test)
print(y_pred)
|
flexible
|
{
"blob_id": "d267c8cbe51fb1bacc9404a1385f1daa4a0db7f2",
"index": 884,
"step-1": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n <mask token>\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n <mask token>\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n <mask token>\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric=\n 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis=1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass KMeans:\n\n def __init__(self, k=5, max_iters=100, random_seed=42):\n self.k = k\n self.max_iters = max_iters\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids) ** 2, axis=1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric=\n 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis=1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n X_cluster = X[cluster_labels == cluster]\n cluster_mean = np.mean(X_cluster, axis=0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n self._initialise_centroids(X)\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n cluster_labels = self._assign_clusters(X)\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\ndata = load_breast_cancer()\nX, y = data.data, data.target\nX_train, X_test = train_test_split(X, test_size=0.1)\nmodel = KMeans(k=5)\nmodel.fit(X_train)\ny_pred = model.predict(X_test)\nprint(y_pred)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import pairwise_distances\n\n\nclass KMeans():\n\n def __init__(self, k = 5, max_iters = 100, random_seed = 42):\n self.k = k\n self.max_iters = max_iters\n\n # Set random seed\n np.random.seed(random_seed)\n\n def _initialise_centroids(self, X):\n random_indices = np.random.permutation(X.shape[0])\n random_indices = random_indices[:self.k]\n self.centroids = X[random_indices]\n\n def _euclidien_distance(self, x):\n return np.sum((x - self.centroids)**2, axis = 1)\n\n def _assign_clusters(self, X):\n cluster_distances = pairwise_distances(X, self.centroids, metric = 'euclidean')\n cluster_labels = np.argmin(cluster_distances, axis = 1)\n return cluster_labels\n\n def _update_centroids(self, X, cluster_labels):\n for cluster in range(self.k):\n\n # Get all data points of a cluster\n X_cluster = X[cluster_labels == cluster]\n\n # Update the cluster's centroid\n cluster_mean = np.mean(X_cluster, axis = 0)\n self.centroids[cluster] = cluster_mean\n\n def fit(self, X):\n\n # Initialise random centroids\n self._initialise_centroids(X)\n\n iterations = 0\n while iterations <= self.max_iters:\n iterations += 1\n\n # Assign clusters to data\n cluster_labels = self._assign_clusters(X)\n\n # Update centroids\n self._update_centroids(X, cluster_labels)\n\n def predict(self, X):\n return self._assign_clusters(X)\n\n\n# Load data\ndata = load_breast_cancer()\nX, y = data.data, data.target\nX_train, X_test = train_test_split(X, test_size = 0.1)\n\n# Fit model\nmodel = KMeans(k = 5)\nmodel.fit(X_train)\n\n# Predict\ny_pred = model.predict(X_test)\nprint(y_pred)\n",
"step-ids": [
6,
7,
8,
10,
12
]
}
|
[
6,
7,
8,
10,
12
] |
'''
Each new term in the Fibonacci sequence is generated by adding the previous two terms.
By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million,
find the sum of the even-valued terms.
'''
def fib(n):
'''
Binet's formula for nth Fibonacci number
http://mathworld.wolfram.com/BinetsFibonacciNumberFormula.html
((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5))
'''
return int(0.4472135954999579392818347337462552470881236719223051448541*
(pow(1.6180339887498948482045868343656381177203091798057628621354,n) -
pow(-0.618033988749894848204586834365638117720309179805762862135,n)))
total = 0
max = 4000000
for k in range(2, max):
x = fib(k)
if x > max:
break
if x % 2 == 0:
total += x
print total
|
normal
|
{
"blob_id": "02a1f84e72b412636d86b9bdb59856ae8c309255",
"index": 9373,
"step-1": "'''\nEach new term in the Fibonacci sequence is generated by adding the previous two terms. \nBy starting with 1 and 2, the first 10 terms will be:\n\n1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...\n\nBy considering the terms in the Fibonacci sequence whose values do not exceed four million, \nfind the sum of the even-valued terms.\n'''\ndef fib(n):\n '''\n Binet's formula for nth Fibonacci number\n http://mathworld.wolfram.com/BinetsFibonacciNumberFormula.html\n ((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5)) \n '''\n return int(0.4472135954999579392818347337462552470881236719223051448541*\n (pow(1.6180339887498948482045868343656381177203091798057628621354,n) - \n pow(-0.618033988749894848204586834365638117720309179805762862135,n)))\n \ntotal = 0\nmax = 4000000\nfor k in range(2, max):\n x = fib(k)\n if x > max:\n break\n if x % 2 == 0:\n total += x\nprint total\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy as np
import itertools
from scipy.linalg import eig, schur
from eigen_rootfinding.polynomial import MultiCheb, MultiPower
from eigen_rootfinding.utils import memoize
from scipy.stats import ortho_group
def indexarray(matrix_terms, which, var):
"""Compute the array mapping monomials under multiplication by x_var
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the exponent for each variable in the ith multivariate
monomial
which : slice object
object to index into the matrix_terms for the monomials we want to multiply by var
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr : 1d integer ndarray
Array containing the indices of the lower-degree monomials after multiplication
by x_var
"""
mults = matrix_terms[which].copy()
mults[:, var] += 1
return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
def indexarray_cheb(matrix_terms, which, var):
"""Compute the array mapping Chebyshev monomials under multiplication by x_var:
T_1*T_0 = T_1
T_1*T_n = .5(T_(n+1)+ T_(n-1))
Parameters
----------
matrix_terms : 2d integer ndarray
Array containing the monomials in order. matrix_terms[i] is the array
containing the degree for each univariate Chebyshev monomial in the ith
multivariate monomial
m : int
Number of monomials of highest degree, i.e. those that do not need to be
multiplied
var : int
Variable to multiply by: x_0, ..., x_(dim-1)
Returns
-------
arr1 : 1d integer ndarray
Array containing the indices of T_(n+1)
arr2 : 1d
Array containing the indices of T_(n-1)
"""
up = matrix_terms[which].copy()
up[:, var] += 1
down = matrix_terms[which].copy()
down[:, var] -= 1
down[down[:, var]==-1, var] += 2
arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)
return arr1, arr2
def ms_matrices(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the monomial basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the monomial basis
matrix_terms : 2d ndarray
Array with ordered monomial basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr = indexarray(matrix_terms, slice(m,None), i)
M[..., i] = Q.conj().T@A[arr]
return M
def ms_matrices_cheb(E, Q, matrix_terms, dim):
"""Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
Q : (l, n) 2d ndarray
Matrix whose columns give the quotient basis in terms of the Chebyshev basis
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
n = Q.shape[1]
m = E.shape[0]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, Q))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)
M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])
return M
def ms_matrices_p(E, P, matrix_terms, dim, cut):
"""Compute the Möller-Stetter matrices in the power basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr = indexarray(matrix_terms, slice(r,None), i)
M[..., i] = A[arr]
return M
def ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):
""" Compute the Möller-Stetter matrices in the Chebyshev basis from a
reduced Macaulay matrix (QRP method)
Parameters
----------
E : (m, k) ndarray
Columns of the reduced Macaulay matrix corresponding to the quotient basis
P : (, l) ndarray
Array of pivots returned in QR with pivoting, used to permute the columns.
matrix_terms : 2d ndarray
Array with ordered Chebyshev basis
dim : int
Number of variables
Returns
-------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
"""
r, n = E.shape
matrix_terms[cut:] = matrix_terms[cut:][P]
M = np.empty((n, n, dim),dtype=E.dtype)
A = np.vstack((-E, np.eye(n)))
for i in range(dim):
arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)
M[..., i] = .5*(A[arr1] + A[arr2])
return M
def sort_eigs(eigs, diag):
"""Sorts the eigs array to match the order on the diagonal
of the Schur factorization
Parameters
----------
eigs : 1d ndarray
Array of unsorted eigenvalues
diag : 1d complex ndarray
Array containing the diagonal of the approximate Schur factorization
Returns
-------
w : 1d ndarray
Eigenvalues from eigs sorted to match the order in diag
"""
n = diag.shape[0]
lst = list(range(n))
arr = []
for eig in eigs:
i = lst[np.argmin(np.abs(diag[lst]-eig))]
arr.append(i)
lst.remove(i)
return np.argsort(arr)
@memoize
def get_rand_combos_matrix(rows, cols, normal=False):
""" Generates a rows by cols random matrix with orthogonal rows or columns,
depending on if rows > cols or cols > rows.
Parameters
----------
rows : int
Number of rows
cols : int
Number of columns
normal : bool
Optional. Whether or not to create a matrix using entries drawn
from the standard normal distribution (N(0, 1)) or not. If it's
False, it will return an orthogonal matrix.
Returns
-------
C : (rows, cols) ndarray
Matrix with orthgonal rows or columns, depending on if rows > cols or
cols > rows if normal is False, otherwise a matrix with
coefficients drawn from the standard normal (N(0, 1)).
"""
np.random.seed(57)
# TODO perhaps explore different types of random matrices?
# randn was giving me conditioning problems
if normal:
C = np.random.normal(loc=0, scale=1, size=(rows, cols))
return C
size = max(rows, cols)
C = ortho_group.rvs(size)
return C[:rows, :cols]
@memoize
def get_Q_c(dim):
""" Generates a once-chosen random orthogonal matrix and a random linear combination
for use in the simultaneous eigenvalue compution.
Parameters
----------
dim : int
Dimension of the system
Returns
-------
Q : (dim, dim) ndarray
Random orthogonal rotation
c : (dim, ) ndarray
Random linear combination
"""
np.random.seed(103)
Q = ortho_group.rvs(dim)
c = np.random.randn(dim)
return Q, c
def msroots(M):
"""Computes the roots to a system via the eigenvalues of the Möller-Stetter
matrices. Implicitly performs a random rotation of the coordinate system
to avoid repeated eigenvalues arising from special structure in the underlying
polynomial system. Approximates the joint eigenvalue problem using a Schur
factorization of a linear combination of the matrices.
Parameters
----------
M : (n, n, dim) ndarray
Array containing the nxn Möller-Stetter matrices, where the matrix
corresponding to multiplication by x_i is M[..., i]
Returns
-------
roots : (n, dim) ndarray
Array containing the approximate roots of the system, where each row
is a root.
"""
dim = M.shape[-1]
# perform a random rotation with a random orthogonal Q
Q, c = get_Q_c(dim)
M = (Q@M[..., np.newaxis])[..., 0]
eigs = np.empty((dim, M.shape[0]), dtype='complex')
# Compute the matrix U that triangularizes a random linear combination
U = schur((M*c).sum(axis=-1), output='complex')[1]
for i in range(0, dim):
T = (U.T.conj())@(M[..., i])@U
w = eig(M[..., i], right=False)
arr = sort_eigs(w, np.diag(T))
eigs[i] = w[arr]
# Rotate back before returning, transposing to match expected shape
return (Q.T@eigs).T
|
normal
|
{
"blob_id": "14fb6776ac30802edf43c43acbee64263c6bdd7b",
"index": 2777,
"step-1": "<mask token>\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\n<mask token>\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-2": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\n<mask token>\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\n<mask token>\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-3": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var] == -1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).\n sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])\n .sum(axis=-1), axis=1)\n return arr1, arr2\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r, None), i)\n M[..., i] = A[arr]\n return M\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n<mask token>\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-4": "<mask token>\n\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]\n ).sum(axis=-1), axis=1)\n\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var] == -1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).\n sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis])\n .sum(axis=-1), axis=1)\n return arr1, arr2\n\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m, None), i)\n M[..., i] = Q.conj().T @ A[arr]\n return M\n\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m, None), i)\n M[..., i] = 0.5 * Q.T.conj() @ (A[arr1] + A[arr2])\n return M\n\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r, None), i)\n M[..., i] = A[arr]\n return M\n\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim), dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r, None), i)\n M[..., i] = 0.5 * (A[arr1] + A[arr2])\n return M\n\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst] - eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n\n@memoize\ndef get_rand_combos_matrix(rows, cols, normal=False):\n \"\"\" Generates a rows by cols random matrix with orthogonal rows or columns,\n depending on if rows > cols or cols > rows.\n\n Parameters\n ----------\n rows : int\n Number of rows\n cols : int\n Number of columns\n normal : bool\n Optional. Whether or not to create a matrix using entries drawn\n from the standard normal distribution (N(0, 1)) or not. If it's\n False, it will return an orthogonal matrix.\n\n Returns\n -------\n C : (rows, cols) ndarray\n Matrix with orthgonal rows or columns, depending on if rows > cols or\n cols > rows if normal is False, otherwise a matrix with\n coefficients drawn from the standard normal (N(0, 1)).\n \"\"\"\n np.random.seed(57)\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]\n\n\n@memoize\ndef get_Q_c(dim):\n \"\"\" Generates a once-chosen random orthogonal matrix and a random linear combination\n for use in the simultaneous eigenvalue compution.\n\n Parameters\n ----------\n dim : int\n Dimension of the system\n\n Returns\n -------\n Q : (dim, dim) ndarray\n Random orthogonal rotation\n c : (dim, ) ndarray\n Random linear combination\n \"\"\"\n np.random.seed(103)\n Q = ortho_group.rvs(dim)\n c = np.random.randn(dim)\n return Q, c\n\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n Q, c = get_Q_c(dim)\n M = (Q @ M[..., np.newaxis])[..., 0]\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n U = schur((M * c).sum(axis=-1), output='complex')[1]\n for i in range(0, dim):\n T = U.T.conj() @ M[..., i] @ U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n return (Q.T @ eigs).T\n",
"step-5": "import numpy as np\nimport itertools\nfrom scipy.linalg import eig, schur\nfrom eigen_rootfinding.polynomial import MultiCheb, MultiPower\nfrom eigen_rootfinding.utils import memoize\nfrom scipy.stats import ortho_group\n\ndef indexarray(matrix_terms, which, var):\n \"\"\"Compute the array mapping monomials under multiplication by x_var\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the exponent for each variable in the ith multivariate\n monomial\n which : slice object\n object to index into the matrix_terms for the monomials we want to multiply by var\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr : 1d integer ndarray\n Array containing the indices of the lower-degree monomials after multiplication\n by x_var\n \"\"\"\n mults = matrix_terms[which].copy()\n mults[:, var] += 1\n return np.argmin(np.abs(mults[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n\ndef indexarray_cheb(matrix_terms, which, var):\n \"\"\"Compute the array mapping Chebyshev monomials under multiplication by x_var:\n\n T_1*T_0 = T_1\n T_1*T_n = .5(T_(n+1)+ T_(n-1))\n\n Parameters\n ----------\n matrix_terms : 2d integer ndarray\n Array containing the monomials in order. matrix_terms[i] is the array\n containing the degree for each univariate Chebyshev monomial in the ith\n multivariate monomial\n m : int\n Number of monomials of highest degree, i.e. those that do not need to be\n multiplied\n var : int\n Variable to multiply by: x_0, ..., x_(dim-1)\n\n Returns\n -------\n arr1 : 1d integer ndarray\n Array containing the indices of T_(n+1)\n arr2 : 1d\n Array containing the indices of T_(n-1)\n \"\"\"\n up = matrix_terms[which].copy()\n up[:, var] += 1\n down = matrix_terms[which].copy()\n down[:, var] -= 1\n down[down[:, var]==-1, var] += 2\n arr1 = np.argmin(np.abs(up[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n arr2 = np.argmin(np.abs(down[:, np.newaxis] - matrix_terms[np.newaxis]).sum(axis=-1), axis=1)\n return arr1, arr2\n\ndef ms_matrices(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the monomial basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the monomial basis\n matrix_terms : 2d ndarray\n Array with ordered monomial basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(m,None), i)\n M[..., i] = Q.conj().T@A[arr]\n return M\n\ndef ms_matrices_cheb(E, Q, matrix_terms, dim):\n \"\"\"Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n Q : (l, n) 2d ndarray\n Matrix whose columns give the quotient basis in terms of the Chebyshev basis\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n n = Q.shape[1]\n m = E.shape[0]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, Q))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(m,None), i)\n M[..., i] = .5*Q.T.conj()@(A[arr1]+A[arr2])\n return M\n\ndef ms_matrices_p(E, P, matrix_terms, dim, cut):\n \"\"\"Compute the Möller-Stetter matrices in the power basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr = indexarray(matrix_terms, slice(r,None), i)\n M[..., i] = A[arr]\n return M\n\ndef ms_matrices_p_cheb(E, P, matrix_terms, dim, cut):\n \"\"\" Compute the Möller-Stetter matrices in the Chebyshev basis from a\n reduced Macaulay matrix (QRP method)\n\n Parameters\n ----------\n E : (m, k) ndarray\n Columns of the reduced Macaulay matrix corresponding to the quotient basis\n P : (, l) ndarray\n Array of pivots returned in QR with pivoting, used to permute the columns.\n matrix_terms : 2d ndarray\n Array with ordered Chebyshev basis\n dim : int\n Number of variables\n\n Returns\n -------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n \"\"\"\n r, n = E.shape\n matrix_terms[cut:] = matrix_terms[cut:][P]\n M = np.empty((n, n, dim),dtype=E.dtype)\n A = np.vstack((-E, np.eye(n)))\n for i in range(dim):\n arr1, arr2 = indexarray_cheb(matrix_terms, slice(r,None), i)\n M[..., i] = .5*(A[arr1] + A[arr2])\n return M\n\ndef sort_eigs(eigs, diag):\n \"\"\"Sorts the eigs array to match the order on the diagonal\n of the Schur factorization\n\n Parameters\n ----------\n eigs : 1d ndarray\n Array of unsorted eigenvalues\n diag : 1d complex ndarray\n Array containing the diagonal of the approximate Schur factorization\n\n Returns\n -------\n w : 1d ndarray\n Eigenvalues from eigs sorted to match the order in diag\n \"\"\"\n n = diag.shape[0]\n lst = list(range(n))\n arr = []\n for eig in eigs:\n i = lst[np.argmin(np.abs(diag[lst]-eig))]\n arr.append(i)\n lst.remove(i)\n return np.argsort(arr)\n\n@memoize\ndef get_rand_combos_matrix(rows, cols, normal=False):\n \"\"\" Generates a rows by cols random matrix with orthogonal rows or columns,\n depending on if rows > cols or cols > rows.\n\n Parameters\n ----------\n rows : int\n Number of rows\n cols : int\n Number of columns\n normal : bool\n Optional. Whether or not to create a matrix using entries drawn\n from the standard normal distribution (N(0, 1)) or not. If it's\n False, it will return an orthogonal matrix.\n\n Returns\n -------\n C : (rows, cols) ndarray\n Matrix with orthgonal rows or columns, depending on if rows > cols or\n cols > rows if normal is False, otherwise a matrix with\n coefficients drawn from the standard normal (N(0, 1)).\n \"\"\"\n np.random.seed(57)\n # TODO perhaps explore different types of random matrices?\n # randn was giving me conditioning problems\n if normal:\n C = np.random.normal(loc=0, scale=1, size=(rows, cols))\n return C\n size = max(rows, cols)\n C = ortho_group.rvs(size)\n return C[:rows, :cols]\n\n@memoize\ndef get_Q_c(dim):\n \"\"\" Generates a once-chosen random orthogonal matrix and a random linear combination\n for use in the simultaneous eigenvalue compution.\n\n Parameters\n ----------\n dim : int\n Dimension of the system\n\n Returns\n -------\n Q : (dim, dim) ndarray\n Random orthogonal rotation\n c : (dim, ) ndarray\n Random linear combination\n \"\"\"\n np.random.seed(103)\n Q = ortho_group.rvs(dim)\n c = np.random.randn(dim)\n return Q, c\n\ndef msroots(M):\n \"\"\"Computes the roots to a system via the eigenvalues of the Möller-Stetter\n matrices. Implicitly performs a random rotation of the coordinate system\n to avoid repeated eigenvalues arising from special structure in the underlying\n polynomial system. Approximates the joint eigenvalue problem using a Schur\n factorization of a linear combination of the matrices.\n\n Parameters\n ----------\n M : (n, n, dim) ndarray\n Array containing the nxn Möller-Stetter matrices, where the matrix\n corresponding to multiplication by x_i is M[..., i]\n\n Returns\n -------\n roots : (n, dim) ndarray\n Array containing the approximate roots of the system, where each row\n is a root.\n \"\"\"\n dim = M.shape[-1]\n\n # perform a random rotation with a random orthogonal Q\n Q, c = get_Q_c(dim)\n M = (Q@M[..., np.newaxis])[..., 0]\n\n eigs = np.empty((dim, M.shape[0]), dtype='complex')\n # Compute the matrix U that triangularizes a random linear combination\n U = schur((M*c).sum(axis=-1), output='complex')[1]\n\n for i in range(0, dim):\n T = (U.T.conj())@(M[..., i])@U\n w = eig(M[..., i], right=False)\n arr = sort_eigs(w, np.diag(T))\n eigs[i] = w[arr]\n\n # Rotate back before returning, transposing to match expected shape\n return (Q.T@eigs).T\n",
"step-ids": [
5,
6,
8,
10,
12
]
}
|
[
5,
6,
8,
10,
12
] |
<|reserved_special_token_0|>
class DataParser:
<|reserved_special_token_0|>
def __init__(self, csvfile, data_centers):
"""DataParser constructor."""
self.accepted_records = []
self.ignored_records = []
with open(csvfile, 'r') as file:
reader = self.create_reader(file)
file.seek(0)
self.create_dataset(reader, data_centers)
def valid_value(self, number):
"""
Summary: Checks that value is a valid positive number.
Description: Accepts positive whole and decimal numbers.
"""
try:
float(number)
if float(number) > 0:
return True
return False
except ValueError:
return False
def create_reader(self, csvfile):
"""
Summary: Validates a csv file, returns a DictReader object.
Description: Takes one argument: "data" (Should be a csv file)
"""
file_dialect = Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
if not Sniffer().has_header(csvfile.read(1024)):
print('Imported csv file lacks header row')
exit()
csvfile.seek(0)
reader = DictReader(csvfile, dialect=file_dialect)
return reader
<|reserved_special_token_0|>
def get_dataset(self):
"""Getter for accepted_records list."""
return self.accepted_records
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataParser:
<|reserved_special_token_0|>
def __init__(self, csvfile, data_centers):
"""DataParser constructor."""
self.accepted_records = []
self.ignored_records = []
with open(csvfile, 'r') as file:
reader = self.create_reader(file)
file.seek(0)
self.create_dataset(reader, data_centers)
def valid_value(self, number):
"""
Summary: Checks that value is a valid positive number.
Description: Accepts positive whole and decimal numbers.
"""
try:
float(number)
if float(number) > 0:
return True
return False
except ValueError:
return False
def create_reader(self, csvfile):
"""
Summary: Validates a csv file, returns a DictReader object.
Description: Takes one argument: "data" (Should be a csv file)
"""
file_dialect = Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
if not Sniffer().has_header(csvfile.read(1024)):
print('Imported csv file lacks header row')
exit()
csvfile.seek(0)
reader = DictReader(csvfile, dialect=file_dialect)
return reader
def create_dataset(self, reader=None, data_centers=None):
"""
Summary: Creates a dataset of dcs and their respective times, values.
Arguments: 'reader' defines a reader object used to read a csv file.
'dataCenters' is a list containing data center names that are to be
graphed.
"""
for row in reader:
if row.get('DC') in data_centers:
if not self.valid_value(row.get('Value')):
self.ignored_records.append(row)
else:
self.accepted_records.append([row.get('DC'), float(row.
get('Time')), float(row.get('Value'))])
def get_dataset(self):
"""Getter for accepted_records list."""
return self.accepted_records
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataParser:
"""
Summary: parses a data file, and returns list of the filtered data.
Instances:
1. accepted_records
2. ignored_records
Methods:
1. valid_value
2. create_reader
3. create_dataset
4. get_dataset
"""
def __init__(self, csvfile, data_centers):
"""DataParser constructor."""
self.accepted_records = []
self.ignored_records = []
with open(csvfile, 'r') as file:
reader = self.create_reader(file)
file.seek(0)
self.create_dataset(reader, data_centers)
def valid_value(self, number):
"""
Summary: Checks that value is a valid positive number.
Description: Accepts positive whole and decimal numbers.
"""
try:
float(number)
if float(number) > 0:
return True
return False
except ValueError:
return False
def create_reader(self, csvfile):
"""
Summary: Validates a csv file, returns a DictReader object.
Description: Takes one argument: "data" (Should be a csv file)
"""
file_dialect = Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
if not Sniffer().has_header(csvfile.read(1024)):
print('Imported csv file lacks header row')
exit()
csvfile.seek(0)
reader = DictReader(csvfile, dialect=file_dialect)
return reader
def create_dataset(self, reader=None, data_centers=None):
"""
Summary: Creates a dataset of dcs and their respective times, values.
Arguments: 'reader' defines a reader object used to read a csv file.
'dataCenters' is a list containing data center names that are to be
graphed.
"""
for row in reader:
if row.get('DC') in data_centers:
if not self.valid_value(row.get('Value')):
self.ignored_records.append(row)
else:
self.accepted_records.append([row.get('DC'), float(row.
get('Time')), float(row.get('Value'))])
def get_dataset(self):
"""Getter for accepted_records list."""
return self.accepted_records
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from sys import exit
from csv import Sniffer, DictReader
class DataParser:
"""
Summary: parses a data file, and returns list of the filtered data.
Instances:
1. accepted_records
2. ignored_records
Methods:
1. valid_value
2. create_reader
3. create_dataset
4. get_dataset
"""
def __init__(self, csvfile, data_centers):
"""DataParser constructor."""
self.accepted_records = []
self.ignored_records = []
with open(csvfile, 'r') as file:
reader = self.create_reader(file)
file.seek(0)
self.create_dataset(reader, data_centers)
def valid_value(self, number):
"""
Summary: Checks that value is a valid positive number.
Description: Accepts positive whole and decimal numbers.
"""
try:
float(number)
if float(number) > 0:
return True
return False
except ValueError:
return False
def create_reader(self, csvfile):
"""
Summary: Validates a csv file, returns a DictReader object.
Description: Takes one argument: "data" (Should be a csv file)
"""
file_dialect = Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
if not Sniffer().has_header(csvfile.read(1024)):
print('Imported csv file lacks header row')
exit()
csvfile.seek(0)
reader = DictReader(csvfile, dialect=file_dialect)
return reader
def create_dataset(self, reader=None, data_centers=None):
"""
Summary: Creates a dataset of dcs and their respective times, values.
Arguments: 'reader' defines a reader object used to read a csv file.
'dataCenters' is a list containing data center names that are to be
graphed.
"""
for row in reader:
if row.get('DC') in data_centers:
if not self.valid_value(row.get('Value')):
self.ignored_records.append(row)
else:
self.accepted_records.append([row.get('DC'), float(row.
get('Time')), float(row.get('Value'))])
def get_dataset(self):
"""Getter for accepted_records list."""
return self.accepted_records
<|reserved_special_token_1|>
"""
Program file: DataParser.py.
This program parses and returns a dataset for a plotting program
"""
from sys import exit
from csv import Sniffer, DictReader
class DataParser:
"""
Summary: parses a data file, and returns list of the filtered data.
Instances:
1. accepted_records
2. ignored_records
Methods:
1. valid_value
2. create_reader
3. create_dataset
4. get_dataset
"""
def __init__(self, csvfile, data_centers):
"""DataParser constructor."""
self.accepted_records = []
self.ignored_records = []
with open(csvfile, 'r') as file:
# Creates a reader object for later data manipulation
reader = self.create_reader(file)
# Resetting read/write pointer to beginning of file
file.seek(0)
# Creating list for graphing data center's dataset
self.create_dataset(reader, data_centers)
def valid_value(self, number):
"""
Summary: Checks that value is a valid positive number.
Description: Accepts positive whole and decimal numbers.
"""
try:
# Checking that entered value can be converted to a float.
# Excludes letters and symbols.
float(number)
# Checking that validated number is nonnegative.
if float(number) > 0:
return True
return False
except ValueError:
return False
def create_reader(self, csvfile):
"""
Summary: Validates a csv file, returns a DictReader object.
Description: Takes one argument: "data" (Should be a csv file)
"""
# Determines the dialect of the csv file for processing
file_dialect = Sniffer().sniff(csvfile.read(1024))
# Resets the read/write pointer within the file
csvfile.seek(0)
# Checks to see that the csv file imported has a header row,
# that will be used for later parsing.
if not Sniffer().has_header(csvfile.read(1024)):
print('Imported csv file lacks header row')
exit()
# Resets the read/write pointer within the file
csvfile.seek(0)
# Creates a DictReader object with the csvfile provided, and the
# dialect object to define the parameters of the reader instance.
reader = DictReader(csvfile, dialect=file_dialect)
# Return DictReader object
return reader
def create_dataset(self, reader=None, data_centers=None):
"""
Summary: Creates a dataset of dcs and their respective times, values.
Arguments: 'reader' defines a reader object used to read a csv file.
'dataCenters' is a list containing data center names that are to be
graphed.
"""
for row in reader:
# Checking that the 'DC' matches one defined in "data_centers" list
if row.get('DC') in data_centers:
# Validating DC's value is a positive nonnegative number.
if not self.valid_value(row.get('Value')):
# Archiving ignored records for later analysis
self.ignored_records.append(row)
else:
self.accepted_records.append(
[
row.get('DC'),
float(row.get('Time')),
float(row.get('Value'))
]
)
def get_dataset(self):
"""Getter for accepted_records list."""
return self.accepted_records
|
flexible
|
{
"blob_id": "af609f1558276bab96477d3a2c61d813b9dd3d82",
"index": 9660,
"step-1": "<mask token>\n\n\nclass DataParser:\n <mask token>\n\n def __init__(self, csvfile, data_centers):\n \"\"\"DataParser constructor.\"\"\"\n self.accepted_records = []\n self.ignored_records = []\n with open(csvfile, 'r') as file:\n reader = self.create_reader(file)\n file.seek(0)\n self.create_dataset(reader, data_centers)\n\n def valid_value(self, number):\n \"\"\"\n Summary: Checks that value is a valid positive number.\n\n Description: Accepts positive whole and decimal numbers.\n \"\"\"\n try:\n float(number)\n if float(number) > 0:\n return True\n return False\n except ValueError:\n return False\n\n def create_reader(self, csvfile):\n \"\"\"\n Summary: Validates a csv file, returns a DictReader object.\n\n Description: Takes one argument: \"data\" (Should be a csv file)\n \"\"\"\n file_dialect = Sniffer().sniff(csvfile.read(1024))\n csvfile.seek(0)\n if not Sniffer().has_header(csvfile.read(1024)):\n print('Imported csv file lacks header row')\n exit()\n csvfile.seek(0)\n reader = DictReader(csvfile, dialect=file_dialect)\n return reader\n <mask token>\n\n def get_dataset(self):\n \"\"\"Getter for accepted_records list.\"\"\"\n return self.accepted_records\n",
"step-2": "<mask token>\n\n\nclass DataParser:\n <mask token>\n\n def __init__(self, csvfile, data_centers):\n \"\"\"DataParser constructor.\"\"\"\n self.accepted_records = []\n self.ignored_records = []\n with open(csvfile, 'r') as file:\n reader = self.create_reader(file)\n file.seek(0)\n self.create_dataset(reader, data_centers)\n\n def valid_value(self, number):\n \"\"\"\n Summary: Checks that value is a valid positive number.\n\n Description: Accepts positive whole and decimal numbers.\n \"\"\"\n try:\n float(number)\n if float(number) > 0:\n return True\n return False\n except ValueError:\n return False\n\n def create_reader(self, csvfile):\n \"\"\"\n Summary: Validates a csv file, returns a DictReader object.\n\n Description: Takes one argument: \"data\" (Should be a csv file)\n \"\"\"\n file_dialect = Sniffer().sniff(csvfile.read(1024))\n csvfile.seek(0)\n if not Sniffer().has_header(csvfile.read(1024)):\n print('Imported csv file lacks header row')\n exit()\n csvfile.seek(0)\n reader = DictReader(csvfile, dialect=file_dialect)\n return reader\n\n def create_dataset(self, reader=None, data_centers=None):\n \"\"\"\n Summary: Creates a dataset of dcs and their respective times, values.\n\n Arguments: 'reader' defines a reader object used to read a csv file.\n 'dataCenters' is a list containing data center names that are to be\n graphed.\n \"\"\"\n for row in reader:\n if row.get('DC') in data_centers:\n if not self.valid_value(row.get('Value')):\n self.ignored_records.append(row)\n else:\n self.accepted_records.append([row.get('DC'), float(row.\n get('Time')), float(row.get('Value'))])\n\n def get_dataset(self):\n \"\"\"Getter for accepted_records list.\"\"\"\n return self.accepted_records\n",
"step-3": "<mask token>\n\n\nclass DataParser:\n \"\"\"\n Summary: parses a data file, and returns list of the filtered data.\n\n Instances:\n 1. accepted_records\n 2. ignored_records\n\n Methods:\n 1. valid_value\n 2. create_reader\n 3. create_dataset\n 4. get_dataset\n \"\"\"\n\n def __init__(self, csvfile, data_centers):\n \"\"\"DataParser constructor.\"\"\"\n self.accepted_records = []\n self.ignored_records = []\n with open(csvfile, 'r') as file:\n reader = self.create_reader(file)\n file.seek(0)\n self.create_dataset(reader, data_centers)\n\n def valid_value(self, number):\n \"\"\"\n Summary: Checks that value is a valid positive number.\n\n Description: Accepts positive whole and decimal numbers.\n \"\"\"\n try:\n float(number)\n if float(number) > 0:\n return True\n return False\n except ValueError:\n return False\n\n def create_reader(self, csvfile):\n \"\"\"\n Summary: Validates a csv file, returns a DictReader object.\n\n Description: Takes one argument: \"data\" (Should be a csv file)\n \"\"\"\n file_dialect = Sniffer().sniff(csvfile.read(1024))\n csvfile.seek(0)\n if not Sniffer().has_header(csvfile.read(1024)):\n print('Imported csv file lacks header row')\n exit()\n csvfile.seek(0)\n reader = DictReader(csvfile, dialect=file_dialect)\n return reader\n\n def create_dataset(self, reader=None, data_centers=None):\n \"\"\"\n Summary: Creates a dataset of dcs and their respective times, values.\n\n Arguments: 'reader' defines a reader object used to read a csv file.\n 'dataCenters' is a list containing data center names that are to be\n graphed.\n \"\"\"\n for row in reader:\n if row.get('DC') in data_centers:\n if not self.valid_value(row.get('Value')):\n self.ignored_records.append(row)\n else:\n self.accepted_records.append([row.get('DC'), float(row.\n get('Time')), float(row.get('Value'))])\n\n def get_dataset(self):\n \"\"\"Getter for accepted_records list.\"\"\"\n return self.accepted_records\n",
"step-4": "<mask token>\nfrom sys import exit\nfrom csv import Sniffer, DictReader\n\n\nclass DataParser:\n \"\"\"\n Summary: parses a data file, and returns list of the filtered data.\n\n Instances:\n 1. accepted_records\n 2. ignored_records\n\n Methods:\n 1. valid_value\n 2. create_reader\n 3. create_dataset\n 4. get_dataset\n \"\"\"\n\n def __init__(self, csvfile, data_centers):\n \"\"\"DataParser constructor.\"\"\"\n self.accepted_records = []\n self.ignored_records = []\n with open(csvfile, 'r') as file:\n reader = self.create_reader(file)\n file.seek(0)\n self.create_dataset(reader, data_centers)\n\n def valid_value(self, number):\n \"\"\"\n Summary: Checks that value is a valid positive number.\n\n Description: Accepts positive whole and decimal numbers.\n \"\"\"\n try:\n float(number)\n if float(number) > 0:\n return True\n return False\n except ValueError:\n return False\n\n def create_reader(self, csvfile):\n \"\"\"\n Summary: Validates a csv file, returns a DictReader object.\n\n Description: Takes one argument: \"data\" (Should be a csv file)\n \"\"\"\n file_dialect = Sniffer().sniff(csvfile.read(1024))\n csvfile.seek(0)\n if not Sniffer().has_header(csvfile.read(1024)):\n print('Imported csv file lacks header row')\n exit()\n csvfile.seek(0)\n reader = DictReader(csvfile, dialect=file_dialect)\n return reader\n\n def create_dataset(self, reader=None, data_centers=None):\n \"\"\"\n Summary: Creates a dataset of dcs and their respective times, values.\n\n Arguments: 'reader' defines a reader object used to read a csv file.\n 'dataCenters' is a list containing data center names that are to be\n graphed.\n \"\"\"\n for row in reader:\n if row.get('DC') in data_centers:\n if not self.valid_value(row.get('Value')):\n self.ignored_records.append(row)\n else:\n self.accepted_records.append([row.get('DC'), float(row.\n get('Time')), float(row.get('Value'))])\n\n def get_dataset(self):\n \"\"\"Getter for accepted_records list.\"\"\"\n return self.accepted_records\n",
"step-5": "\"\"\"\nProgram file: DataParser.py.\n\nThis program parses and returns a dataset for a plotting program\n\"\"\"\n\nfrom sys import exit\nfrom csv import Sniffer, DictReader\n\n\nclass DataParser:\n \"\"\"\n Summary: parses a data file, and returns list of the filtered data.\n\n Instances:\n 1. accepted_records\n 2. ignored_records\n\n Methods:\n 1. valid_value\n 2. create_reader\n 3. create_dataset\n 4. get_dataset\n \"\"\"\n\n def __init__(self, csvfile, data_centers):\n \"\"\"DataParser constructor.\"\"\"\n self.accepted_records = []\n self.ignored_records = []\n\n with open(csvfile, 'r') as file:\n # Creates a reader object for later data manipulation\n reader = self.create_reader(file)\n\n # Resetting read/write pointer to beginning of file\n file.seek(0)\n\n # Creating list for graphing data center's dataset\n self.create_dataset(reader, data_centers)\n\n def valid_value(self, number):\n \"\"\"\n Summary: Checks that value is a valid positive number.\n\n Description: Accepts positive whole and decimal numbers.\n \"\"\"\n try:\n # Checking that entered value can be converted to a float.\n # Excludes letters and symbols.\n float(number)\n\n # Checking that validated number is nonnegative.\n if float(number) > 0:\n return True\n return False\n except ValueError:\n return False\n\n def create_reader(self, csvfile):\n \"\"\"\n Summary: Validates a csv file, returns a DictReader object.\n\n Description: Takes one argument: \"data\" (Should be a csv file)\n \"\"\"\n # Determines the dialect of the csv file for processing\n file_dialect = Sniffer().sniff(csvfile.read(1024))\n\n # Resets the read/write pointer within the file\n csvfile.seek(0)\n\n # Checks to see that the csv file imported has a header row,\n # that will be used for later parsing.\n if not Sniffer().has_header(csvfile.read(1024)):\n print('Imported csv file lacks header row')\n exit()\n\n # Resets the read/write pointer within the file\n csvfile.seek(0)\n\n # Creates a DictReader object with the csvfile provided, and the\n # dialect object to define the parameters of the reader instance.\n reader = DictReader(csvfile, dialect=file_dialect)\n\n # Return DictReader object\n return reader\n\n def create_dataset(self, reader=None, data_centers=None):\n \"\"\"\n Summary: Creates a dataset of dcs and their respective times, values.\n\n Arguments: 'reader' defines a reader object used to read a csv file.\n 'dataCenters' is a list containing data center names that are to be\n graphed.\n \"\"\"\n for row in reader:\n # Checking that the 'DC' matches one defined in \"data_centers\" list\n if row.get('DC') in data_centers:\n # Validating DC's value is a positive nonnegative number.\n if not self.valid_value(row.get('Value')):\n # Archiving ignored records for later analysis\n self.ignored_records.append(row)\n else:\n self.accepted_records.append(\n [\n row.get('DC'),\n float(row.get('Time')),\n float(row.get('Value'))\n ]\n )\n\n def get_dataset(self):\n \"\"\"Getter for accepted_records list.\"\"\"\n return self.accepted_records\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 13:34:19 2020
@author: ShihaoYang
"""
from pyltp import SentenceSplitter
from pyltp import Segmentor
from pyltp import Postagger
from pyltp import Parser
from pyltp import NamedEntityRecognizer
import os
import jieba
import re
os.getcwd()
os.chdir('/Users/emilywang/shihao yang')
os.getcwd()
LTP_DATA_DIR='/Users/emilywang/shihao yang/ltp_data_v3.4.0/'
cws_model_path=os.path.join(LTP_DATA_DIR,'cws.model')
segmentor=Segmentor()
segmentor.load(cws_model_path)
pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model') # 词性标注模型路径,模型名称为`pos.model`
postagger = Postagger() # 初始化实例
postagger.load(pos_model_path) # 加载模型
par_model_path = os.path.join(LTP_DATA_DIR, 'parser.model') # 依存句法分析模型路径,模型名称为`parser.model`
parser = Parser() # 初始化实例
parser.load(par_model_path) # 加载模型
class Sentence(object):
def __init__(self,text):
self.text = text
self.data = dict()
def SentS(self):
sents = SentenceSplitter.split(self.text) # 分句
return sents
def getLTPAnalysis(self, sentence):
words=segmentor.segment(sentence)
#print('\t'.join(words))
postags = postagger.postag(words) # 词性标注
#print('\t'.join(postags))
arcs = parser.parse(words, postags) # 句法分析
rely_id = [arc.head for arc in arcs] # 提取依存父节点id
relation = [arc.relation for arc in arcs] # 提取依存关系
#heads = ['Root' if id == 0 else words[id-1] for id in rely_id] # 匹配依存父节点词语
#for i in range(len(words)):
#print(relation[i] + '(' + words[i] + ', ' + heads[i] + ')')
return words,postags,rely_id,relation
def getWord(self, words,postags,rely_id,relation,_id,wType):
sbv = None
for i in range(len(words)):
if relation[i] == wType and rely_id[i] == (_id)+1:
return i
return sbv
def getpron(self,words,postags,rely_id,relation,_id):
flag = None
for i in range(len(words)):
if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i] == (_id)+1:
flag = i
break
if flag == None:
return None
pob = None
vob_of_pob = None
pob = self.getWord(words,postags,rely_id,relation,flag,'POB')
if pob:
vob_of_pob = self.getWord(words,postags,rely_id,relation,pob,'VOB')
if vob_of_pob:
return vob_of_pob
else:
return pob
return None
def getatt_of_sbv(self,words,postags,rely_id,relation,_id):
for i in range(len(words)):
if relation[i] == 'ATT' and rely_id[i] == (_id)+1 and (postags[i]=='a' or postags[i]=='n'):
return i
return None
def getFirstNotNone(self, array):
for word in array:
if word is not None:
return word
return None
def getMainsent(self,realsbv,sentence):
re = ''
words,postags,rely_id,relation = self.getLTPAnalysis(sentence)
#hed = self.getHED(array)
if 0 not in rely_id:
return None,None
hed = rely_id.index(0)
sbv = self.getWord(words,postags,rely_id,relation,hed, 'SBV') # 主语
vob = self.getWord(words,postags,rely_id,relation,hed, 'VOB') # 宾语
fob = self.getWord(words,postags,rely_id,relation,hed, 'FOB') # 后置宾语
###############
if sbv == None:
reals = realsbv
elif postags[sbv] == 'r' and realsbv != None:
reals = realsbv
else:
reals = words[sbv]
if reals == None:
return None,None
if sbv != None and postags[sbv] == 'n':
temp = self.getatt_of_sbv(words,postags,rely_id,relation,sbv)
if temp != None:
if words[sbv] not in self.data.keys():
self.data[words[sbv]] = [words[temp]]
else:
self.data[words[sbv]].append(words[temp])
if sbv != None:
sbvcoo = self.getWord(words,postags,rely_id,relation,sbv, 'COO')
if sbvcoo != None:
reals += words[sbvcoo]
###############
if postags[hed] == 'a':
temp = self.getpron(words,postags,rely_id,relation,hed)
if temp!= None:
re = '{} {} {}'.format(reals, words[hed], words[temp])
elif sbv != None:
temp = self.getatt_of_sbv(words,postags,rely_id,relation,sbv)
if temp != None:
re = '{} {} {}'.format(words[temp] + reals, words[hed], '')
else:
re = '{} {} {}'.format(reals, words[hed], '')
return reals,re
finalvob = self.getFirstNotNone([vob, fob])
if finalvob != None:
temp = self.getWord(words,postags,rely_id,relation,finalvob, 'VOB')
if temp != None:
re = '{} {} {}'.format(reals, words[hed], words[finalvob] + words[temp])
else:
re = '{} {} {}'.format(reals, words[hed], words[finalvob])
else:
re = '{} {} {}'.format(reals, words[hed], '')
return reals,re
def getMain(self,sentence):
sentence = re.sub(' ','。',sentence)
sentence = re.sub(',','。',sentence)
sents = SentenceSplitter.split(sentence)
reals = None
for s in sents:
reals,res = self.getMainsent(reals,s)
if res != None:
print(res)
def gettextmain(self):
sents = self.SentS()
for s in sents:
self.getMain(s)
s = Sentence('陈欣婕今天真好看。她今天中午吃炸鸡')
s.gettextmain()
def readfile():
fn = open('/users/emilywang/shihao yang/beef.txt') # 打开文件
string_data = fn.read() # 读出整个文件
fn.close() # 关闭文件
# Removing Square Brackets and Extra Spaces in Texts
string_data = re.sub(r'\[[0-9]*\]', ' ', string_data)
string_data = re.sub(r'\s+', ' ', string_data)
string_data = re.sub('-', '', string_data)
return string_data
string = readfile()
s = Sentence(readfile())
s.gettextmain()
s.data
string = readfile()
s = Sentence('苹果和香蕉都是水果')
s.gettextmain()
s.data
|
normal
|
{
"blob_id": "dc41c64d09e5fdd0e234f516eeec0cbd2433876c",
"index": 8507,
"step-1": "<mask token>\n\n\nclass Sentence(object):\n\n def __init__(self, text):\n self.text = text\n self.data = dict()\n\n def SentS(self):\n sents = SentenceSplitter.split(self.text)\n return sents\n\n def getLTPAnalysis(self, sentence):\n words = segmentor.segment(sentence)\n postags = postagger.postag(words)\n arcs = parser.parse(words, postags)\n rely_id = [arc.head for arc in arcs]\n relation = [arc.relation for arc in arcs]\n return words, postags, rely_id, relation\n\n def getWord(self, words, postags, rely_id, relation, _id, wType):\n sbv = None\n for i in range(len(words)):\n if relation[i] == wType and rely_id[i] == _id + 1:\n return i\n return sbv\n\n def getpron(self, words, postags, rely_id, relation, _id):\n flag = None\n for i in range(len(words)):\n if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i\n ] == _id + 1:\n flag = i\n break\n if flag == None:\n return None\n pob = None\n vob_of_pob = None\n pob = self.getWord(words, postags, rely_id, relation, flag, 'POB')\n if pob:\n vob_of_pob = self.getWord(words, postags, rely_id, relation,\n pob, 'VOB')\n if vob_of_pob:\n return vob_of_pob\n else:\n return pob\n return None\n <mask token>\n <mask token>\n\n def getMainsent(self, realsbv, sentence):\n re = ''\n words, postags, rely_id, relation = self.getLTPAnalysis(sentence)\n if 0 not in rely_id:\n return None, None\n hed = rely_id.index(0)\n sbv = self.getWord(words, postags, rely_id, relation, hed, 'SBV')\n vob = self.getWord(words, postags, rely_id, relation, hed, 'VOB')\n fob = self.getWord(words, postags, rely_id, relation, hed, 'FOB')\n if sbv == None:\n reals = realsbv\n elif postags[sbv] == 'r' and realsbv != None:\n reals = realsbv\n else:\n reals = words[sbv]\n if reals == None:\n return None, None\n if sbv != None and postags[sbv] == 'n':\n temp = self.getatt_of_sbv(words, postags, rely_id, relation, sbv)\n if temp != None:\n if words[sbv] not in self.data.keys():\n self.data[words[sbv]] = [words[temp]]\n else:\n self.data[words[sbv]].append(words[temp])\n if sbv != None:\n sbvcoo = self.getWord(words, postags, rely_id, relation, sbv, 'COO'\n )\n if sbvcoo != None:\n reals += words[sbvcoo]\n if postags[hed] == 'a':\n temp = self.getpron(words, postags, rely_id, relation, hed)\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[temp])\n elif sbv != None:\n temp = self.getatt_of_sbv(words, postags, rely_id, relation,\n sbv)\n if temp != None:\n re = '{} {} {}'.format(words[temp] + reals, words[hed], '')\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n finalvob = self.getFirstNotNone([vob, fob])\n if finalvob != None:\n temp = self.getWord(words, postags, rely_id, relation, finalvob,\n 'VOB')\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob] +\n words[temp])\n else:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob])\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n\n def getMain(self, sentence):\n sentence = re.sub(' ', '。', sentence)\n sentence = re.sub(',', '。', sentence)\n sents = SentenceSplitter.split(sentence)\n reals = None\n for s in sents:\n reals, res = self.getMainsent(reals, s)\n if res != None:\n print(res)\n\n def gettextmain(self):\n sents = self.SentS()\n for s in sents:\n self.getMain(s)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sentence(object):\n\n def __init__(self, text):\n self.text = text\n self.data = dict()\n\n def SentS(self):\n sents = SentenceSplitter.split(self.text)\n return sents\n\n def getLTPAnalysis(self, sentence):\n words = segmentor.segment(sentence)\n postags = postagger.postag(words)\n arcs = parser.parse(words, postags)\n rely_id = [arc.head for arc in arcs]\n relation = [arc.relation for arc in arcs]\n return words, postags, rely_id, relation\n\n def getWord(self, words, postags, rely_id, relation, _id, wType):\n sbv = None\n for i in range(len(words)):\n if relation[i] == wType and rely_id[i] == _id + 1:\n return i\n return sbv\n\n def getpron(self, words, postags, rely_id, relation, _id):\n flag = None\n for i in range(len(words)):\n if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i\n ] == _id + 1:\n flag = i\n break\n if flag == None:\n return None\n pob = None\n vob_of_pob = None\n pob = self.getWord(words, postags, rely_id, relation, flag, 'POB')\n if pob:\n vob_of_pob = self.getWord(words, postags, rely_id, relation,\n pob, 'VOB')\n if vob_of_pob:\n return vob_of_pob\n else:\n return pob\n return None\n\n def getatt_of_sbv(self, words, postags, rely_id, relation, _id):\n for i in range(len(words)):\n if relation[i] == 'ATT' and rely_id[i] == _id + 1 and (postags[\n i] == 'a' or postags[i] == 'n'):\n return i\n return None\n\n def getFirstNotNone(self, array):\n for word in array:\n if word is not None:\n return word\n return None\n\n def getMainsent(self, realsbv, sentence):\n re = ''\n words, postags, rely_id, relation = self.getLTPAnalysis(sentence)\n if 0 not in rely_id:\n return None, None\n hed = rely_id.index(0)\n sbv = self.getWord(words, postags, rely_id, relation, hed, 'SBV')\n vob = self.getWord(words, postags, rely_id, relation, hed, 'VOB')\n fob = self.getWord(words, postags, rely_id, relation, hed, 'FOB')\n if sbv == None:\n reals = realsbv\n elif postags[sbv] == 'r' and realsbv != None:\n reals = realsbv\n else:\n reals = words[sbv]\n if reals == None:\n return None, None\n if sbv != None and postags[sbv] == 'n':\n temp = self.getatt_of_sbv(words, postags, rely_id, relation, sbv)\n if temp != None:\n if words[sbv] not in self.data.keys():\n self.data[words[sbv]] = [words[temp]]\n else:\n self.data[words[sbv]].append(words[temp])\n if sbv != None:\n sbvcoo = self.getWord(words, postags, rely_id, relation, sbv, 'COO'\n )\n if sbvcoo != None:\n reals += words[sbvcoo]\n if postags[hed] == 'a':\n temp = self.getpron(words, postags, rely_id, relation, hed)\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[temp])\n elif sbv != None:\n temp = self.getatt_of_sbv(words, postags, rely_id, relation,\n sbv)\n if temp != None:\n re = '{} {} {}'.format(words[temp] + reals, words[hed], '')\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n finalvob = self.getFirstNotNone([vob, fob])\n if finalvob != None:\n temp = self.getWord(words, postags, rely_id, relation, finalvob,\n 'VOB')\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob] +\n words[temp])\n else:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob])\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n\n def getMain(self, sentence):\n sentence = re.sub(' ', '。', sentence)\n sentence = re.sub(',', '。', sentence)\n sents = SentenceSplitter.split(sentence)\n reals = None\n for s in sents:\n reals, res = self.getMainsent(reals, s)\n if res != None:\n print(res)\n\n def gettextmain(self):\n sents = self.SentS()\n for s in sents:\n self.getMain(s)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Sentence(object):\n\n def __init__(self, text):\n self.text = text\n self.data = dict()\n\n def SentS(self):\n sents = SentenceSplitter.split(self.text)\n return sents\n\n def getLTPAnalysis(self, sentence):\n words = segmentor.segment(sentence)\n postags = postagger.postag(words)\n arcs = parser.parse(words, postags)\n rely_id = [arc.head for arc in arcs]\n relation = [arc.relation for arc in arcs]\n return words, postags, rely_id, relation\n\n def getWord(self, words, postags, rely_id, relation, _id, wType):\n sbv = None\n for i in range(len(words)):\n if relation[i] == wType and rely_id[i] == _id + 1:\n return i\n return sbv\n\n def getpron(self, words, postags, rely_id, relation, _id):\n flag = None\n for i in range(len(words)):\n if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i\n ] == _id + 1:\n flag = i\n break\n if flag == None:\n return None\n pob = None\n vob_of_pob = None\n pob = self.getWord(words, postags, rely_id, relation, flag, 'POB')\n if pob:\n vob_of_pob = self.getWord(words, postags, rely_id, relation,\n pob, 'VOB')\n if vob_of_pob:\n return vob_of_pob\n else:\n return pob\n return None\n\n def getatt_of_sbv(self, words, postags, rely_id, relation, _id):\n for i in range(len(words)):\n if relation[i] == 'ATT' and rely_id[i] == _id + 1 and (postags[\n i] == 'a' or postags[i] == 'n'):\n return i\n return None\n\n def getFirstNotNone(self, array):\n for word in array:\n if word is not None:\n return word\n return None\n\n def getMainsent(self, realsbv, sentence):\n re = ''\n words, postags, rely_id, relation = self.getLTPAnalysis(sentence)\n if 0 not in rely_id:\n return None, None\n hed = rely_id.index(0)\n sbv = self.getWord(words, postags, rely_id, relation, hed, 'SBV')\n vob = self.getWord(words, postags, rely_id, relation, hed, 'VOB')\n fob = self.getWord(words, postags, rely_id, relation, hed, 'FOB')\n if sbv == None:\n reals = realsbv\n elif postags[sbv] == 'r' and realsbv != None:\n reals = realsbv\n else:\n reals = words[sbv]\n if reals == None:\n return None, None\n if sbv != None and postags[sbv] == 'n':\n temp = self.getatt_of_sbv(words, postags, rely_id, relation, sbv)\n if temp != None:\n if words[sbv] not in self.data.keys():\n self.data[words[sbv]] = [words[temp]]\n else:\n self.data[words[sbv]].append(words[temp])\n if sbv != None:\n sbvcoo = self.getWord(words, postags, rely_id, relation, sbv, 'COO'\n )\n if sbvcoo != None:\n reals += words[sbvcoo]\n if postags[hed] == 'a':\n temp = self.getpron(words, postags, rely_id, relation, hed)\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[temp])\n elif sbv != None:\n temp = self.getatt_of_sbv(words, postags, rely_id, relation,\n sbv)\n if temp != None:\n re = '{} {} {}'.format(words[temp] + reals, words[hed], '')\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n finalvob = self.getFirstNotNone([vob, fob])\n if finalvob != None:\n temp = self.getWord(words, postags, rely_id, relation, finalvob,\n 'VOB')\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob] +\n words[temp])\n else:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob])\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n\n def getMain(self, sentence):\n sentence = re.sub(' ', '。', sentence)\n sentence = re.sub(',', '。', sentence)\n sents = SentenceSplitter.split(sentence)\n reals = None\n for s in sents:\n reals, res = self.getMainsent(reals, s)\n if res != None:\n print(res)\n\n def gettextmain(self):\n sents = self.SentS()\n for s in sents:\n self.getMain(s)\n\n\n<mask token>\n\n\ndef readfile():\n fn = open('/users/emilywang/shihao yang/beef.txt')\n string_data = fn.read()\n fn.close()\n string_data = re.sub('\\\\[[0-9]*\\\\]', ' ', string_data)\n string_data = re.sub('\\\\s+', ' ', string_data)\n string_data = re.sub('-', '', string_data)\n return string_data\n\n\n<mask token>\n",
"step-4": "<mask token>\nos.getcwd()\nos.chdir('/Users/emilywang/shihao yang')\nos.getcwd()\nLTP_DATA_DIR = '/Users/emilywang/shihao yang/ltp_data_v3.4.0/'\ncws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')\nsegmentor = Segmentor()\nsegmentor.load(cws_model_path)\npos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')\npostagger = Postagger()\npostagger.load(pos_model_path)\npar_model_path = os.path.join(LTP_DATA_DIR, 'parser.model')\nparser = Parser()\nparser.load(par_model_path)\n\n\nclass Sentence(object):\n\n def __init__(self, text):\n self.text = text\n self.data = dict()\n\n def SentS(self):\n sents = SentenceSplitter.split(self.text)\n return sents\n\n def getLTPAnalysis(self, sentence):\n words = segmentor.segment(sentence)\n postags = postagger.postag(words)\n arcs = parser.parse(words, postags)\n rely_id = [arc.head for arc in arcs]\n relation = [arc.relation for arc in arcs]\n return words, postags, rely_id, relation\n\n def getWord(self, words, postags, rely_id, relation, _id, wType):\n sbv = None\n for i in range(len(words)):\n if relation[i] == wType and rely_id[i] == _id + 1:\n return i\n return sbv\n\n def getpron(self, words, postags, rely_id, relation, _id):\n flag = None\n for i in range(len(words)):\n if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i\n ] == _id + 1:\n flag = i\n break\n if flag == None:\n return None\n pob = None\n vob_of_pob = None\n pob = self.getWord(words, postags, rely_id, relation, flag, 'POB')\n if pob:\n vob_of_pob = self.getWord(words, postags, rely_id, relation,\n pob, 'VOB')\n if vob_of_pob:\n return vob_of_pob\n else:\n return pob\n return None\n\n def getatt_of_sbv(self, words, postags, rely_id, relation, _id):\n for i in range(len(words)):\n if relation[i] == 'ATT' and rely_id[i] == _id + 1 and (postags[\n i] == 'a' or postags[i] == 'n'):\n return i\n return None\n\n def getFirstNotNone(self, array):\n for word in array:\n if word is not None:\n return word\n return None\n\n def getMainsent(self, realsbv, sentence):\n re = ''\n words, postags, rely_id, relation = self.getLTPAnalysis(sentence)\n if 0 not in rely_id:\n return None, None\n hed = rely_id.index(0)\n sbv = self.getWord(words, postags, rely_id, relation, hed, 'SBV')\n vob = self.getWord(words, postags, rely_id, relation, hed, 'VOB')\n fob = self.getWord(words, postags, rely_id, relation, hed, 'FOB')\n if sbv == None:\n reals = realsbv\n elif postags[sbv] == 'r' and realsbv != None:\n reals = realsbv\n else:\n reals = words[sbv]\n if reals == None:\n return None, None\n if sbv != None and postags[sbv] == 'n':\n temp = self.getatt_of_sbv(words, postags, rely_id, relation, sbv)\n if temp != None:\n if words[sbv] not in self.data.keys():\n self.data[words[sbv]] = [words[temp]]\n else:\n self.data[words[sbv]].append(words[temp])\n if sbv != None:\n sbvcoo = self.getWord(words, postags, rely_id, relation, sbv, 'COO'\n )\n if sbvcoo != None:\n reals += words[sbvcoo]\n if postags[hed] == 'a':\n temp = self.getpron(words, postags, rely_id, relation, hed)\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[temp])\n elif sbv != None:\n temp = self.getatt_of_sbv(words, postags, rely_id, relation,\n sbv)\n if temp != None:\n re = '{} {} {}'.format(words[temp] + reals, words[hed], '')\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n finalvob = self.getFirstNotNone([vob, fob])\n if finalvob != None:\n temp = self.getWord(words, postags, rely_id, relation, finalvob,\n 'VOB')\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob] +\n words[temp])\n else:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob])\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals, re\n\n def getMain(self, sentence):\n sentence = re.sub(' ', '。', sentence)\n sentence = re.sub(',', '。', sentence)\n sents = SentenceSplitter.split(sentence)\n reals = None\n for s in sents:\n reals, res = self.getMainsent(reals, s)\n if res != None:\n print(res)\n\n def gettextmain(self):\n sents = self.SentS()\n for s in sents:\n self.getMain(s)\n\n\ns = Sentence('陈欣婕今天真好看。她今天中午吃炸鸡')\ns.gettextmain()\n\n\ndef readfile():\n fn = open('/users/emilywang/shihao yang/beef.txt')\n string_data = fn.read()\n fn.close()\n string_data = re.sub('\\\\[[0-9]*\\\\]', ' ', string_data)\n string_data = re.sub('\\\\s+', ' ', string_data)\n string_data = re.sub('-', '', string_data)\n return string_data\n\n\nstring = readfile()\ns = Sentence(readfile())\ns.gettextmain()\ns.data\nstring = readfile()\ns = Sentence('苹果和香蕉都是水果')\ns.gettextmain()\ns.data\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 2 13:34:19 2020\n\n@author: ShihaoYang\n\"\"\"\n\nfrom pyltp import SentenceSplitter\nfrom pyltp import Segmentor\nfrom pyltp import Postagger\nfrom pyltp import Parser\nfrom pyltp import NamedEntityRecognizer\nimport os\nimport jieba\nimport re\nos.getcwd()\nos.chdir('/Users/emilywang/shihao yang')\nos.getcwd()\n\nLTP_DATA_DIR='/Users/emilywang/shihao yang/ltp_data_v3.4.0/'\n\ncws_model_path=os.path.join(LTP_DATA_DIR,'cws.model')\nsegmentor=Segmentor()\nsegmentor.load(cws_model_path)\n\npos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model') # 词性标注模型路径,模型名称为`pos.model`\npostagger = Postagger() # 初始化实例\npostagger.load(pos_model_path) # 加载模型\n\npar_model_path = os.path.join(LTP_DATA_DIR, 'parser.model') # 依存句法分析模型路径,模型名称为`parser.model`\nparser = Parser() # 初始化实例\nparser.load(par_model_path) # 加载模型\n\nclass Sentence(object):\n\n def __init__(self,text):\n self.text = text\n self.data = dict()\n \n def SentS(self):\n sents = SentenceSplitter.split(self.text) # 分句\n return sents\n \n def getLTPAnalysis(self, sentence):\n\n words=segmentor.segment(sentence)\n #print('\\t'.join(words))\n\n postags = postagger.postag(words) # 词性标注\n #print('\\t'.join(postags))\n arcs = parser.parse(words, postags) # 句法分析\n rely_id = [arc.head for arc in arcs] # 提取依存父节点id\n relation = [arc.relation for arc in arcs] # 提取依存关系\n #heads = ['Root' if id == 0 else words[id-1] for id in rely_id] # 匹配依存父节点词语\n #for i in range(len(words)):\n #print(relation[i] + '(' + words[i] + ', ' + heads[i] + ')')\n \n return words,postags,rely_id,relation\n\n def getWord(self, words,postags,rely_id,relation,_id,wType):\n sbv = None\n for i in range(len(words)):\n if relation[i] == wType and rely_id[i] == (_id)+1:\n return i\n return sbv\n \n def getpron(self,words,postags,rely_id,relation,_id):\n flag = None\n for i in range(len(words)):\n if relation[i] == 'ADV' and postags[i] == 'p' and rely_id[i] == (_id)+1:\n flag = i\n break\n if flag == None:\n return None\n pob = None\n vob_of_pob = None\n pob = self.getWord(words,postags,rely_id,relation,flag,'POB')\n if pob:\n vob_of_pob = self.getWord(words,postags,rely_id,relation,pob,'VOB')\n if vob_of_pob:\n return vob_of_pob\n else:\n return pob\n return None\n \n def getatt_of_sbv(self,words,postags,rely_id,relation,_id):\n for i in range(len(words)):\n if relation[i] == 'ATT' and rely_id[i] == (_id)+1 and (postags[i]=='a' or postags[i]=='n'):\n return i\n return None\n def getFirstNotNone(self, array):\n for word in array:\n if word is not None:\n return word\n return None\n \n def getMainsent(self,realsbv,sentence):\n re = ''\n words,postags,rely_id,relation = self.getLTPAnalysis(sentence)\n #hed = self.getHED(array)\n if 0 not in rely_id:\n return None,None\n \n hed = rely_id.index(0)\n sbv = self.getWord(words,postags,rely_id,relation,hed, 'SBV') # 主语\n vob = self.getWord(words,postags,rely_id,relation,hed, 'VOB') # 宾语\n fob = self.getWord(words,postags,rely_id,relation,hed, 'FOB') # 后置宾语\n ###############\n if sbv == None:\n reals = realsbv\n elif postags[sbv] == 'r' and realsbv != None:\n reals = realsbv\n else:\n reals = words[sbv]\n \n if reals == None:\n return None,None\n if sbv != None and postags[sbv] == 'n':\n temp = self.getatt_of_sbv(words,postags,rely_id,relation,sbv)\n if temp != None:\n if words[sbv] not in self.data.keys():\n self.data[words[sbv]] = [words[temp]]\n else:\n self.data[words[sbv]].append(words[temp])\n if sbv != None:\n sbvcoo = self.getWord(words,postags,rely_id,relation,sbv, 'COO')\n if sbvcoo != None:\n reals += words[sbvcoo]\n ###############\n if postags[hed] == 'a':\n temp = self.getpron(words,postags,rely_id,relation,hed)\n if temp!= None:\n re = '{} {} {}'.format(reals, words[hed], words[temp])\n elif sbv != None:\n temp = self.getatt_of_sbv(words,postags,rely_id,relation,sbv)\n if temp != None:\n re = '{} {} {}'.format(words[temp] + reals, words[hed], '')\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals,re\n \n finalvob = self.getFirstNotNone([vob, fob])\n if finalvob != None:\n temp = self.getWord(words,postags,rely_id,relation,finalvob, 'VOB')\n if temp != None:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob] + words[temp])\n else:\n re = '{} {} {}'.format(reals, words[hed], words[finalvob])\n else:\n re = '{} {} {}'.format(reals, words[hed], '')\n return reals,re\n def getMain(self,sentence):\n sentence = re.sub(' ','。',sentence)\n sentence = re.sub(',','。',sentence)\n sents = SentenceSplitter.split(sentence)\n reals = None\n for s in sents:\n reals,res = self.getMainsent(reals,s)\n if res != None:\n print(res)\n def gettextmain(self):\n sents = self.SentS()\n for s in sents:\n self.getMain(s)\n \ns = Sentence('陈欣婕今天真好看。她今天中午吃炸鸡')\ns.gettextmain()\n\ndef readfile():\n fn = open('/users/emilywang/shihao yang/beef.txt') # 打开文件\n string_data = fn.read() # 读出整个文件\n fn.close() # 关闭文件\n \n # Removing Square Brackets and Extra Spaces in Texts\n string_data = re.sub(r'\\[[0-9]*\\]', ' ', string_data)\n string_data = re.sub(r'\\s+', ' ', string_data)\n string_data = re.sub('-', '', string_data)\n\n return string_data\nstring = readfile()\ns = Sentence(readfile())\ns.gettextmain()\ns.data\n\nstring = readfile()\ns = Sentence('苹果和香蕉都是水果')\ns.gettextmain()\ns.data",
"step-ids": [
9,
11,
12,
14,
16
]
}
|
[
9,
11,
12,
14,
16
] |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Brandon Bennett <bennetb@gmail.com>
#
# Send a notification via notifyserver (https://github.com/nemith/notifyserver)
# on highlight/private message or new DCC.
#
# History:
#
# 2015-02-07, Brandon Bennett <bennetb@gmail.com>:
# version 0.1: initial release
#
SCRIPT_NAME = 'notifyserver'
SCRIPT_AUTHOR = 'Brandon Bennett <bennetb@gmail.com>'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'MIT'
SCRIPT_DESC = 'Send a notification to a notifyserver on highlight/private message or new DCC'
import_ok = True
try:
import weechat
except:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: http://www.weechat.org/')
import_ok = False
try:
import json, urllib2
except ImportError as message:
print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))
import_ok = False
cfg = None
class Config(object):
_DEFAULT = {
'url' : 'http://localhost:9999/notify',
'title': 'IRC Notification',
'activate_label': "",
'sound': "",
}
def __init__(self):
self._opts = {}
for opt, value in self._DEFAULT.items():
if not weechat.config_is_set_plugin(opt):
weechat.config_set_plugin(opt, value)
self.update()
def update(self):
for opt in self._DEFAULT.keys():
self._opts[opt] = weechat.config_get_plugin(opt)
def __getitem__(self, key):
return self._opts[key]
def config_cb(data, option, value):
cfg.update()
return weechat.WEECHAT_RC_OK
def send_notify(**kwargs):
data = json.dumps(kwargs)
req = urllib2.Request(cfg['url'], data, {'Content-Type': 'application/json'})
f = urllib2.urlopen(req)
response = f.read()
f.close()
def notify(subtitle, message):
opt = {}
if cfg['activate_label']:
opt['activate'] = cfg['activate_label']
if cfg['sound']:
opt['sound'] = cfg['sound']
send_notify(
title=cfg['title'],
subtitle=subtitle,
message=message,
**opt)
def handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message):
highlight = bool(highlight)
buffer_type = weechat.buffer_get_string(pbuffer, "localvar_type")
buffer_name = weechat.buffer_get_string(pbuffer, "short_name")
away = weechat.buffer_get_string(pbuffer, "localvar_away")
if buffer_type == 'private':
notify("Private message from {}".format(buffer_name), message)
elif buffer_type == 'channel' and highlight:
notify("Highlight {}@{}".format(prefix, buffer_name), message)
return weechat.WEECHAT_RC_OK
if __name__ == '__main__' and import_ok:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
cfg = Config()
weechat.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_cb", "")
weechat.hook_print("", "", "", 1, "handle_msg", "")
|
normal
|
{
"blob_id": "0ae9ad7af26e3d19f2d3967c02611503c32aea70",
"index": 2593,
"step-1": "<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type':\n 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type':\n 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\n\ndef notify(subtitle, message):\n opt = {}\n if cfg['activate_label']:\n opt['activate'] = cfg['activate_label']\n if cfg['sound']:\n opt['sound'] = cfg['sound']\n send_notify(title=cfg['title'], subtitle=subtitle, message=message, **opt)\n\n\ndef handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message\n ):\n highlight = bool(highlight)\n buffer_type = weechat.buffer_get_string(pbuffer, 'localvar_type')\n buffer_name = weechat.buffer_get_string(pbuffer, 'short_name')\n away = weechat.buffer_get_string(pbuffer, 'localvar_away')\n if buffer_type == 'private':\n notify('Private message from {}'.format(buffer_name), message)\n elif buffer_type == 'channel' and highlight:\n notify('Highlight {}@{}'.format(prefix, buffer_name), message)\n return weechat.WEECHAT_RC_OK\n\n\n<mask token>\n",
"step-4": "<mask token>\ntry:\n import weechat\nexcept:\n print('This script must be run under WeeChat.')\n print('Get WeeChat now at: http://www.weechat.org/')\n import_ok = False\ntry:\n import json, urllib2\nexcept ImportError as message:\n print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))\n import_ok = False\n<mask token>\n\n\nclass Config(object):\n _DEFAULT = {'url': 'http://localhost:9999/notify', 'title':\n 'IRC Notification', 'activate_label': '', 'sound': ''}\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value)\n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type':\n 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\n\ndef notify(subtitle, message):\n opt = {}\n if cfg['activate_label']:\n opt['activate'] = cfg['activate_label']\n if cfg['sound']:\n opt['sound'] = cfg['sound']\n send_notify(title=cfg['title'], subtitle=subtitle, message=message, **opt)\n\n\ndef handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message\n ):\n highlight = bool(highlight)\n buffer_type = weechat.buffer_get_string(pbuffer, 'localvar_type')\n buffer_name = weechat.buffer_get_string(pbuffer, 'short_name')\n away = weechat.buffer_get_string(pbuffer, 'localvar_away')\n if buffer_type == 'private':\n notify('Private message from {}'.format(buffer_name), message)\n elif buffer_type == 'channel' and highlight:\n notify('Highlight {}@{}'.format(prefix, buffer_name), message)\n return weechat.WEECHAT_RC_OK\n\n\nif __name__ == '__main__' and import_ok:\n if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,\n SCRIPT_LICENSE, SCRIPT_DESC, '', ''):\n cfg = Config()\n weechat.hook_config('plugins.var.python.' + SCRIPT_NAME + '.*',\n 'config_cb', '')\n weechat.hook_print('', '', '', 1, 'handle_msg', '')\n",
"step-5": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015 Brandon Bennett <bennetb@gmail.com>\n#\n# Send a notification via notifyserver (https://github.com/nemith/notifyserver) \n# on highlight/private message or new DCC.\n#\n# History:\n#\n# 2015-02-07, Brandon Bennett <bennetb@gmail.com>:\n# version 0.1: initial release\n#\n\nSCRIPT_NAME = 'notifyserver'\nSCRIPT_AUTHOR = 'Brandon Bennett <bennetb@gmail.com>'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'MIT'\nSCRIPT_DESC = 'Send a notification to a notifyserver on highlight/private message or new DCC'\n\nimport_ok = True\n\ntry:\n import weechat\nexcept:\n print('This script must be run under WeeChat.')\n print('Get WeeChat now at: http://www.weechat.org/')\n import_ok = False\n\ntry:\n import json, urllib2\nexcept ImportError as message:\n print('Missing package(s) for %s: %s' % (SCRIPT_NAME, message))\n import_ok = False\n\n\ncfg = None\n\n\nclass Config(object):\n _DEFAULT = {\n 'url' : 'http://localhost:9999/notify',\n 'title': 'IRC Notification',\n 'activate_label': \"\",\n 'sound': \"\",\n }\n\n def __init__(self):\n self._opts = {}\n for opt, value in self._DEFAULT.items():\n if not weechat.config_is_set_plugin(opt):\n weechat.config_set_plugin(opt, value) \n self.update()\n\n def update(self):\n for opt in self._DEFAULT.keys():\n self._opts[opt] = weechat.config_get_plugin(opt)\n\n def __getitem__(self, key):\n return self._opts[key]\n\ndef config_cb(data, option, value):\n cfg.update()\n return weechat.WEECHAT_RC_OK\n\ndef send_notify(**kwargs):\n data = json.dumps(kwargs)\n req = urllib2.Request(cfg['url'], data, {'Content-Type': 'application/json'})\n f = urllib2.urlopen(req)\n response = f.read()\n f.close()\n\ndef notify(subtitle, message):\n opt = {}\n if cfg['activate_label']:\n opt['activate'] = cfg['activate_label']\n if cfg['sound']:\n opt['sound'] = cfg['sound']\n\n send_notify(\n title=cfg['title'],\n subtitle=subtitle,\n message=message,\n **opt)\n\ndef handle_msg(data, pbuffer, date, tags, displayed, highlight, prefix, message):\n highlight = bool(highlight)\n buffer_type = weechat.buffer_get_string(pbuffer, \"localvar_type\")\n buffer_name = weechat.buffer_get_string(pbuffer, \"short_name\")\n away = weechat.buffer_get_string(pbuffer, \"localvar_away\")\n\n if buffer_type == 'private':\n notify(\"Private message from {}\".format(buffer_name), message)\n elif buffer_type == 'channel' and highlight:\n notify(\"Highlight {}@{}\".format(prefix, buffer_name), message)\n\n return weechat.WEECHAT_RC_OK\n\nif __name__ == '__main__' and import_ok:\n if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,\n SCRIPT_LICENSE, SCRIPT_DESC, '', ''):\n cfg = Config()\n \n weechat.hook_config(\"plugins.var.python.\" + SCRIPT_NAME + \".*\", \"config_cb\", \"\")\n weechat.hook_print(\"\", \"\", \"\", 1, \"handle_msg\", \"\")\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(str01[3])
print(str01[1])
<|reserved_special_token_0|>
print(l01)
<|reserved_special_token_0|>
while i <= index_last:
print(str01[i])
i += 1
print()
print('上面的循环结束了 执行到这里')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
str01 = '大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&'
a = str01[3]
print(str01[3])
print(str01[1])
l01 = len(str01)
print(l01)
str01 = '大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&'
len01 = len(str01)
index_last = len01 - 1
i = 0
while i <= index_last:
print(str01[i])
i += 1
print()
print('上面的循环结束了 执行到这里')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# 遍历(循环) 出字符串中的每一个元素
str01 = "大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&"
# ----->字符串中的元素都是有索引的,根据索引可以得到对应的元素
# 而---3
a = str01[3]
print(str01[3])
# 发---1
print(str01[1])
#---->计算字符串的长度
# 这个字符串中 有 35个元素 ,长度是35
l01 = len(str01)
print(l01)
str01 = "大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&"
# 最后一个元素的索引:字符串的长度-1
len01 = len(str01)# 字符串的长度
index_last = len01 - 1 # 最后一个元素的索引
i = 0 # i变量表示是 元素的索引
while i <= index_last:
print(str01[i])
i += 1
print()
print("上面的循环结束了 执行到这里")
'''
0 1 2 ..... 34
'''
|
flexible
|
{
"blob_id": "7262d7a82834b38762616a30d4eac38078e4b616",
"index": 6724,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(str01[3])\nprint(str01[1])\n<mask token>\nprint(l01)\n<mask token>\nwhile i <= index_last:\n print(str01[i])\n i += 1\nprint()\nprint('上面的循环结束了 执行到这里')\n<mask token>\n",
"step-3": "str01 = '大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&'\na = str01[3]\nprint(str01[3])\nprint(str01[1])\nl01 = len(str01)\nprint(l01)\nstr01 = '大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&'\nlen01 = len(str01)\nindex_last = len01 - 1\ni = 0\nwhile i <= index_last:\n print(str01[i])\n i += 1\nprint()\nprint('上面的循环结束了 执行到这里')\n<mask token>\n",
"step-4": "# 遍历(循环) 出字符串中的每一个元素\r\nstr01 = \"大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&\"\r\n\r\n\r\n# ----->字符串中的元素都是有索引的,根据索引可以得到对应的元素\r\n# 而---3\r\na = str01[3]\r\nprint(str01[3])\r\n\r\n# 发---1\r\nprint(str01[1])\r\n\r\n#---->计算字符串的长度\r\n# 这个字符串中 有 35个元素 ,长度是35\r\nl01 = len(str01)\r\nprint(l01)\r\n\r\n\r\n\r\nstr01 = \"大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&\"\r\n# 最后一个元素的索引:字符串的长度-1\r\n\r\nlen01 = len(str01)# 字符串的长度\r\nindex_last = len01 - 1 # 最后一个元素的索引\r\ni = 0 # i变量表示是 元素的索引\r\n\r\nwhile i <= index_last:\r\n print(str01[i])\r\n i += 1\r\n\r\nprint()\r\nprint(\"上面的循环结束了 执行到这里\")\r\n\r\n\r\n'''\r\n 0 1 2 ..... 34\r\n \r\n\r\n'''\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from django.shortcuts import render, redirect
from app.models import PaidTimeOff, Schedule
from django.utils import timezone
from django.contrib import messages
from app.decorators import user_is_authenticated
from app.views import utils
@require_http_methods(["GET", "POST"])
@user_is_authenticated
def index(request, user_id):
user = utils.current_user(request)
if not user:
return HttpResponse("User " + str(user_id) + " NOT FOUND")
pto = PaidTimeOff.objects.filter(user=user).first()
if not pto:
return HttpResponse("PTO " + str(user_id) + " NOT FOUND")
if request.method == "GET":
return index_get(request, user_id, user, pto)
elif request.method == "POST":
return index_post(request, user_id, user, pto)
else:
return HttpResponse("Invalid HTTP method")
def index_get(request, user_id, user, pto): # pylint: disable=unused-argument
schedules = Schedule.to_calendar((Schedule.objects.filter(pto=pto)))
context = pto.__dict__
context.update({"schedules": schedules, "current_user": user})
return render(request, "users/paid_time_off.html",
context=context)
def index_post(request, user_id, user, pto):
form = request.POST
if not form:
return HttpResponse("No form found")
err_msg = PaidTimeOff.validate_PTO_form(form)
if len(err_msg) > 0:
messages.add_message(request, messages.INFO, err_msg)
else:
try:
date_begin = Schedule.reformat(form['date_begin'])
date_end = Schedule.reformat(form['date_end'])
Schedule.objects.create(
user=user, pto=pto, date_begin=date_begin,
date_end=date_end, event_name=form['event_name'],
event_type='PTO', event_desc=form['event_description'],
created_at=timezone.now(), updated_at=timezone.now())
messages.add_message(request, messages.INFO,
"Information successfully updated")
except Exception as e:
messages.add_message(request, messages.INFO, str(e))
url = "/users/%s/paid_time_off/" % user_id
return redirect(url, permanent=False)
|
normal
|
{
"blob_id": "7245d4db6440d38b9302907a6203c1507c373112",
"index": 6970,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef index_get(request, user_id, user, pto):\n schedules = Schedule.to_calendar(Schedule.objects.filter(pto=pto))\n context = pto.__dict__\n context.update({'schedules': schedules, 'current_user': user})\n return render(request, 'users/paid_time_off.html', context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse('No form found')\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(user=user, pto=pto, date_begin=\n date_begin, date_end=date_end, event_name=form['event_name'\n ], event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n 'Information successfully updated')\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = '/users/%s/paid_time_off/' % user_id\n return redirect(url, permanent=False)\n",
"step-3": "<mask token>\n\n\n@require_http_methods(['GET', 'POST'])\n@user_is_authenticated\ndef index(request, user_id):\n user = utils.current_user(request)\n if not user:\n return HttpResponse('User ' + str(user_id) + ' NOT FOUND')\n pto = PaidTimeOff.objects.filter(user=user).first()\n if not pto:\n return HttpResponse('PTO ' + str(user_id) + ' NOT FOUND')\n if request.method == 'GET':\n return index_get(request, user_id, user, pto)\n elif request.method == 'POST':\n return index_post(request, user_id, user, pto)\n else:\n return HttpResponse('Invalid HTTP method')\n\n\ndef index_get(request, user_id, user, pto):\n schedules = Schedule.to_calendar(Schedule.objects.filter(pto=pto))\n context = pto.__dict__\n context.update({'schedules': schedules, 'current_user': user})\n return render(request, 'users/paid_time_off.html', context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse('No form found')\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(user=user, pto=pto, date_begin=\n date_begin, date_end=date_end, event_name=form['event_name'\n ], event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n 'Information successfully updated')\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = '/users/%s/paid_time_off/' % user_id\n return redirect(url, permanent=False)\n",
"step-4": "from django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.shortcuts import render, redirect\nfrom app.models import PaidTimeOff, Schedule\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom app.decorators import user_is_authenticated\nfrom app.views import utils\n\n\n@require_http_methods(['GET', 'POST'])\n@user_is_authenticated\ndef index(request, user_id):\n user = utils.current_user(request)\n if not user:\n return HttpResponse('User ' + str(user_id) + ' NOT FOUND')\n pto = PaidTimeOff.objects.filter(user=user).first()\n if not pto:\n return HttpResponse('PTO ' + str(user_id) + ' NOT FOUND')\n if request.method == 'GET':\n return index_get(request, user_id, user, pto)\n elif request.method == 'POST':\n return index_post(request, user_id, user, pto)\n else:\n return HttpResponse('Invalid HTTP method')\n\n\ndef index_get(request, user_id, user, pto):\n schedules = Schedule.to_calendar(Schedule.objects.filter(pto=pto))\n context = pto.__dict__\n context.update({'schedules': schedules, 'current_user': user})\n return render(request, 'users/paid_time_off.html', context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse('No form found')\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(user=user, pto=pto, date_begin=\n date_begin, date_end=date_end, event_name=form['event_name'\n ], event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n 'Information successfully updated')\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = '/users/%s/paid_time_off/' % user_id\n return redirect(url, permanent=False)\n",
"step-5": "\n\nfrom django.http import HttpResponse\nfrom django.views.decorators.http import require_http_methods\nfrom django.shortcuts import render, redirect\nfrom app.models import PaidTimeOff, Schedule\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom app.decorators import user_is_authenticated\nfrom app.views import utils\n\n\n@require_http_methods([\"GET\", \"POST\"])\n@user_is_authenticated\ndef index(request, user_id):\n user = utils.current_user(request)\n if not user:\n return HttpResponse(\"User \" + str(user_id) + \" NOT FOUND\")\n pto = PaidTimeOff.objects.filter(user=user).first()\n if not pto:\n return HttpResponse(\"PTO \" + str(user_id) + \" NOT FOUND\")\n if request.method == \"GET\":\n return index_get(request, user_id, user, pto)\n elif request.method == \"POST\":\n return index_post(request, user_id, user, pto)\n else:\n return HttpResponse(\"Invalid HTTP method\")\n\n\ndef index_get(request, user_id, user, pto): # pylint: disable=unused-argument\n schedules = Schedule.to_calendar((Schedule.objects.filter(pto=pto)))\n context = pto.__dict__\n context.update({\"schedules\": schedules, \"current_user\": user})\n return render(request, \"users/paid_time_off.html\",\n context=context)\n\n\ndef index_post(request, user_id, user, pto):\n form = request.POST\n if not form:\n return HttpResponse(\"No form found\")\n err_msg = PaidTimeOff.validate_PTO_form(form)\n if len(err_msg) > 0:\n messages.add_message(request, messages.INFO, err_msg)\n else:\n try:\n date_begin = Schedule.reformat(form['date_begin'])\n date_end = Schedule.reformat(form['date_end'])\n Schedule.objects.create(\n user=user, pto=pto, date_begin=date_begin,\n date_end=date_end, event_name=form['event_name'],\n event_type='PTO', event_desc=form['event_description'],\n created_at=timezone.now(), updated_at=timezone.now())\n messages.add_message(request, messages.INFO,\n \"Information successfully updated\")\n except Exception as e:\n messages.add_message(request, messages.INFO, str(e))\n url = \"/users/%s/paid_time_off/\" % user_id\n return redirect(url, permanent=False)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#OpenCV create samples commands
#opencv_createsamples -img watch5050.jpg -bg bg.txt -info info/info.lst -pngoutput info -maxxangle 0.5 -maxyangle 0.5 -maxzangle 0.5 -num 1950
#opencv_createsamples -info info/info.lst -num 1950 -w 20 -h 20 -vec positives.vec
#Training command
#opencv_traincascade -data data -vec positives.vec -bg bg.txt -numPos 1800 -numNeg 900 -numStages 10 -w 20 -h 20
|
normal
|
{
"blob_id": "62e0c3b6095a65a4508eddfa9c0a1cb31d6c917b",
"index": 8887,
"step-1": "#OpenCV create samples commands\r\n#opencv_createsamples -img watch5050.jpg -bg bg.txt -info info/info.lst -pngoutput info -maxxangle 0.5 -maxyangle 0.5 -maxzangle 0.5 -num 1950\r\n#opencv_createsamples -info info/info.lst -num 1950 -w 20 -h 20 -vec positives.vec\r\n\r\n#Training command\r\n#opencv_traincascade -data data -vec positives.vec -bg bg.txt -numPos 1800 -numNeg 900 -numStages 10 -w 20 -h 20\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
from kivy.uix.progressbar import ProgressBar
from kivy.animation import Animation
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.graphics import Color, Rectangle
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.gridlayout import GridLayout
from kivy.core.window import Window
from kivy.uix.dropdown import DropDown
Window.clearcolor = (1, 1, 1, 1)
class _BoxLayout(BoxLayout):
def __init__(self, **kwargs):
super(_BoxLayout, self).__init__(**kwargs)
with self.canvas.before:
Color(0.878, 0.941, 0.784)
self.rect = Rectangle(size=self.size, pos=self.pos)
self.bind(size=self._update_rect, pos=self._update_rect)
def _update_rect(self, instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
class KaliteUI(object):
def __init__(self, kaliteApp):
dropdown = DropDown()
dropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=None, size=(150, 40), font_size=18
, color=(.06, .6, .2, 1), bold=True, background_color=(1, 1, 1, 0.2))
dropdown_btn.bind(on_release=dropdown.open)
self.root_layout = GridLayout(cols=1)
logo_holder = _BoxLayout(orientation='horizontal')
logo_img = Image(source='horizontal-logo.png', size_hint_x=None, width=360)
logo_holder.padding = [10,10,10,10]
logo_holder.add_widget(logo_img)
self.content_reload_btn= Button(text='Reload Content', size_hint_x=None, size_hint_y=None, size=(150, 40), font_size=18
, color=(1, 1, 1, 1), bold=True)
self.content_reload_btn.bind(on_press=kaliteApp.reload_content)
space_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': .8})
logo_holder.add_widget(space_holder)
buttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')
dropdown.add_widget(self.content_reload_btn)
logo_holder.add_widget(dropdown_btn)
logo_holder.spacing = [300, 0]
self.root_layout.add_widget(logo_holder)
self.img_holder = BoxLayout(orientation='vertical', size=(200,200), size_hint=(1, None))
self.img_holder.padding = [0,80,0,10]
self.root_layout.add_widget(self.img_holder)
self.progress_bar = ProgressBar()
self.messages = BoxLayout(orientation='vertical')
self.root_layout.add_widget(self.messages)
self.root_layout.add_widget(buttons_holder)
self.root_layout.add_widget(self.progress_bar)
def disable_reload_bnt(self):
self.content_reload_btn.disabled = True
def get_root_Layout(self):
return self.root_layout
def add_messages(self, message):
self.messages.add_widget(message)
def remove_messages(self, message):
self.messages.remove_widget(message)
def add_loading_gif(self):
self.gif_img = Image(source='loading.zip', anim_delay = 0.15)
self.img_holder.add_widget(self.gif_img)
def remove_loading_gif(self):
self.img_holder.remove_widget(self.gif_img)
def start_progress_bar(self, anim_value):
self.anim = Animation(value = anim_value, duration = 3)
self.anim.start(self.progress_bar)
def animation_bind(self, bindFunction):
self.anim.bind(on_complete = bindFunction)
|
normal
|
{
"blob_id": "35cd1c45294b826784eab9885ec5b0132624c957",
"index": 4028,
"step-1": "<mask token>\n\n\nclass KaliteUI(object):\n\n def __init__(self, kaliteApp):\n dropdown = DropDown()\n dropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=\n None, size=(150, 40), font_size=18, color=(0.06, 0.6, 0.2, 1),\n bold=True, background_color=(1, 1, 1, 0.2))\n dropdown_btn.bind(on_release=dropdown.open)\n self.root_layout = GridLayout(cols=1)\n logo_holder = _BoxLayout(orientation='horizontal')\n logo_img = Image(source='horizontal-logo.png', size_hint_x=None,\n width=360)\n logo_holder.padding = [10, 10, 10, 10]\n logo_holder.add_widget(logo_img)\n self.content_reload_btn = Button(text='Reload Content', size_hint_x\n =None, size_hint_y=None, size=(150, 40), font_size=18, color=(1,\n 1, 1, 1), bold=True)\n self.content_reload_btn.bind(on_press=kaliteApp.reload_content)\n space_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': 0.8}\n )\n logo_holder.add_widget(space_holder)\n buttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')\n dropdown.add_widget(self.content_reload_btn)\n logo_holder.add_widget(dropdown_btn)\n logo_holder.spacing = [300, 0]\n self.root_layout.add_widget(logo_holder)\n self.img_holder = BoxLayout(orientation='vertical', size=(200, 200),\n size_hint=(1, None))\n self.img_holder.padding = [0, 80, 0, 10]\n self.root_layout.add_widget(self.img_holder)\n self.progress_bar = ProgressBar()\n self.messages = BoxLayout(orientation='vertical')\n self.root_layout.add_widget(self.messages)\n self.root_layout.add_widget(buttons_holder)\n self.root_layout.add_widget(self.progress_bar)\n\n def disable_reload_bnt(self):\n self.content_reload_btn.disabled = True\n\n def get_root_Layout(self):\n return self.root_layout\n\n def add_messages(self, message):\n self.messages.add_widget(message)\n\n def remove_messages(self, message):\n self.messages.remove_widget(message)\n\n def add_loading_gif(self):\n self.gif_img = Image(source='loading.zip', anim_delay=0.15)\n self.img_holder.add_widget(self.gif_img)\n\n def remove_loading_gif(self):\n self.img_holder.remove_widget(self.gif_img)\n\n def start_progress_bar(self, anim_value):\n self.anim = Animation(value=anim_value, duration=3)\n self.anim.start(self.progress_bar)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _BoxLayout(BoxLayout):\n <mask token>\n <mask token>\n\n\nclass KaliteUI(object):\n\n def __init__(self, kaliteApp):\n dropdown = DropDown()\n dropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=\n None, size=(150, 40), font_size=18, color=(0.06, 0.6, 0.2, 1),\n bold=True, background_color=(1, 1, 1, 0.2))\n dropdown_btn.bind(on_release=dropdown.open)\n self.root_layout = GridLayout(cols=1)\n logo_holder = _BoxLayout(orientation='horizontal')\n logo_img = Image(source='horizontal-logo.png', size_hint_x=None,\n width=360)\n logo_holder.padding = [10, 10, 10, 10]\n logo_holder.add_widget(logo_img)\n self.content_reload_btn = Button(text='Reload Content', size_hint_x\n =None, size_hint_y=None, size=(150, 40), font_size=18, color=(1,\n 1, 1, 1), bold=True)\n self.content_reload_btn.bind(on_press=kaliteApp.reload_content)\n space_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': 0.8}\n )\n logo_holder.add_widget(space_holder)\n buttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')\n dropdown.add_widget(self.content_reload_btn)\n logo_holder.add_widget(dropdown_btn)\n logo_holder.spacing = [300, 0]\n self.root_layout.add_widget(logo_holder)\n self.img_holder = BoxLayout(orientation='vertical', size=(200, 200),\n size_hint=(1, None))\n self.img_holder.padding = [0, 80, 0, 10]\n self.root_layout.add_widget(self.img_holder)\n self.progress_bar = ProgressBar()\n self.messages = BoxLayout(orientation='vertical')\n self.root_layout.add_widget(self.messages)\n self.root_layout.add_widget(buttons_holder)\n self.root_layout.add_widget(self.progress_bar)\n\n def disable_reload_bnt(self):\n self.content_reload_btn.disabled = True\n\n def get_root_Layout(self):\n return self.root_layout\n\n def add_messages(self, message):\n self.messages.add_widget(message)\n\n def remove_messages(self, message):\n self.messages.remove_widget(message)\n\n def add_loading_gif(self):\n self.gif_img = Image(source='loading.zip', anim_delay=0.15)\n self.img_holder.add_widget(self.gif_img)\n\n def remove_loading_gif(self):\n self.img_holder.remove_widget(self.gif_img)\n\n def start_progress_bar(self, anim_value):\n self.anim = Animation(value=anim_value, duration=3)\n self.anim.start(self.progress_bar)\n\n def animation_bind(self, bindFunction):\n self.anim.bind(on_complete=bindFunction)\n",
"step-3": "<mask token>\nWindow.clearcolor = 1, 1, 1, 1\n\n\nclass _BoxLayout(BoxLayout):\n\n def __init__(self, **kwargs):\n super(_BoxLayout, self).__init__(**kwargs)\n with self.canvas.before:\n Color(0.878, 0.941, 0.784)\n self.rect = Rectangle(size=self.size, pos=self.pos)\n self.bind(size=self._update_rect, pos=self._update_rect)\n\n def _update_rect(self, instance, value):\n self.rect.pos = instance.pos\n self.rect.size = instance.size\n\n\nclass KaliteUI(object):\n\n def __init__(self, kaliteApp):\n dropdown = DropDown()\n dropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=\n None, size=(150, 40), font_size=18, color=(0.06, 0.6, 0.2, 1),\n bold=True, background_color=(1, 1, 1, 0.2))\n dropdown_btn.bind(on_release=dropdown.open)\n self.root_layout = GridLayout(cols=1)\n logo_holder = _BoxLayout(orientation='horizontal')\n logo_img = Image(source='horizontal-logo.png', size_hint_x=None,\n width=360)\n logo_holder.padding = [10, 10, 10, 10]\n logo_holder.add_widget(logo_img)\n self.content_reload_btn = Button(text='Reload Content', size_hint_x\n =None, size_hint_y=None, size=(150, 40), font_size=18, color=(1,\n 1, 1, 1), bold=True)\n self.content_reload_btn.bind(on_press=kaliteApp.reload_content)\n space_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': 0.8}\n )\n logo_holder.add_widget(space_holder)\n buttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')\n dropdown.add_widget(self.content_reload_btn)\n logo_holder.add_widget(dropdown_btn)\n logo_holder.spacing = [300, 0]\n self.root_layout.add_widget(logo_holder)\n self.img_holder = BoxLayout(orientation='vertical', size=(200, 200),\n size_hint=(1, None))\n self.img_holder.padding = [0, 80, 0, 10]\n self.root_layout.add_widget(self.img_holder)\n self.progress_bar = ProgressBar()\n self.messages = BoxLayout(orientation='vertical')\n self.root_layout.add_widget(self.messages)\n self.root_layout.add_widget(buttons_holder)\n self.root_layout.add_widget(self.progress_bar)\n\n def disable_reload_bnt(self):\n self.content_reload_btn.disabled = True\n\n def get_root_Layout(self):\n return self.root_layout\n\n def add_messages(self, message):\n self.messages.add_widget(message)\n\n def remove_messages(self, message):\n self.messages.remove_widget(message)\n\n def add_loading_gif(self):\n self.gif_img = Image(source='loading.zip', anim_delay=0.15)\n self.img_holder.add_widget(self.gif_img)\n\n def remove_loading_gif(self):\n self.img_holder.remove_widget(self.gif_img)\n\n def start_progress_bar(self, anim_value):\n self.anim = Animation(value=anim_value, duration=3)\n self.anim.start(self.progress_bar)\n\n def animation_bind(self, bindFunction):\n self.anim.bind(on_complete=bindFunction)\n",
"step-4": "from kivy.uix.progressbar import ProgressBar\nfrom kivy.animation import Animation\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.image import Image\nfrom kivy.graphics import Color, Rectangle\nfrom kivy.core.window import Window\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.core.window import Window\nfrom kivy.uix.dropdown import DropDown\nWindow.clearcolor = 1, 1, 1, 1\n\n\nclass _BoxLayout(BoxLayout):\n\n def __init__(self, **kwargs):\n super(_BoxLayout, self).__init__(**kwargs)\n with self.canvas.before:\n Color(0.878, 0.941, 0.784)\n self.rect = Rectangle(size=self.size, pos=self.pos)\n self.bind(size=self._update_rect, pos=self._update_rect)\n\n def _update_rect(self, instance, value):\n self.rect.pos = instance.pos\n self.rect.size = instance.size\n\n\nclass KaliteUI(object):\n\n def __init__(self, kaliteApp):\n dropdown = DropDown()\n dropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=\n None, size=(150, 40), font_size=18, color=(0.06, 0.6, 0.2, 1),\n bold=True, background_color=(1, 1, 1, 0.2))\n dropdown_btn.bind(on_release=dropdown.open)\n self.root_layout = GridLayout(cols=1)\n logo_holder = _BoxLayout(orientation='horizontal')\n logo_img = Image(source='horizontal-logo.png', size_hint_x=None,\n width=360)\n logo_holder.padding = [10, 10, 10, 10]\n logo_holder.add_widget(logo_img)\n self.content_reload_btn = Button(text='Reload Content', size_hint_x\n =None, size_hint_y=None, size=(150, 40), font_size=18, color=(1,\n 1, 1, 1), bold=True)\n self.content_reload_btn.bind(on_press=kaliteApp.reload_content)\n space_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': 0.8}\n )\n logo_holder.add_widget(space_holder)\n buttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')\n dropdown.add_widget(self.content_reload_btn)\n logo_holder.add_widget(dropdown_btn)\n logo_holder.spacing = [300, 0]\n self.root_layout.add_widget(logo_holder)\n self.img_holder = BoxLayout(orientation='vertical', size=(200, 200),\n size_hint=(1, None))\n self.img_holder.padding = [0, 80, 0, 10]\n self.root_layout.add_widget(self.img_holder)\n self.progress_bar = ProgressBar()\n self.messages = BoxLayout(orientation='vertical')\n self.root_layout.add_widget(self.messages)\n self.root_layout.add_widget(buttons_holder)\n self.root_layout.add_widget(self.progress_bar)\n\n def disable_reload_bnt(self):\n self.content_reload_btn.disabled = True\n\n def get_root_Layout(self):\n return self.root_layout\n\n def add_messages(self, message):\n self.messages.add_widget(message)\n\n def remove_messages(self, message):\n self.messages.remove_widget(message)\n\n def add_loading_gif(self):\n self.gif_img = Image(source='loading.zip', anim_delay=0.15)\n self.img_holder.add_widget(self.gif_img)\n\n def remove_loading_gif(self):\n self.img_holder.remove_widget(self.gif_img)\n\n def start_progress_bar(self, anim_value):\n self.anim = Animation(value=anim_value, duration=3)\n self.anim.start(self.progress_bar)\n\n def animation_bind(self, bindFunction):\n self.anim.bind(on_complete=bindFunction)\n",
"step-5": "from kivy.uix.progressbar import ProgressBar\nfrom kivy.animation import Animation\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.uix.image import Image\nfrom kivy.graphics import Color, Rectangle\nfrom kivy.core.window import Window\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.core.window import Window\nfrom kivy.uix.dropdown import DropDown\nWindow.clearcolor = (1, 1, 1, 1)\n\nclass _BoxLayout(BoxLayout):\n\tdef __init__(self, **kwargs):\n\t\tsuper(_BoxLayout, self).__init__(**kwargs)\n\t\twith self.canvas.before:\n\t\t Color(0.878, 0.941, 0.784)\n\t\t self.rect = Rectangle(size=self.size, pos=self.pos)\n\t\tself.bind(size=self._update_rect, pos=self._update_rect)\n\n\tdef _update_rect(self, instance, value):\n\t\tself.rect.pos = instance.pos\n\t\tself.rect.size = instance.size\n\nclass KaliteUI(object):\n\tdef __init__(self, kaliteApp):\n\t\tdropdown = DropDown()\n\t\tdropdown_btn = Button(text='menu', size_hint_x=None, size_hint_y=None, size=(150, 40), font_size=18\n\t\t , color=(.06, .6, .2, 1), bold=True, background_color=(1, 1, 1, 0.2))\n\t\tdropdown_btn.bind(on_release=dropdown.open)\n\n\t\tself.root_layout = GridLayout(cols=1)\n\t\tlogo_holder = _BoxLayout(orientation='horizontal')\n\t\tlogo_img = Image(source='horizontal-logo.png', size_hint_x=None, width=360)\n\n\t\tlogo_holder.padding = [10,10,10,10]\n\t\tlogo_holder.add_widget(logo_img)\n\n\t\tself.content_reload_btn= Button(text='Reload Content', size_hint_x=None, size_hint_y=None, size=(150, 40), font_size=18\n\t\t , color=(1, 1, 1, 1), bold=True)\n\n\t\tself.content_reload_btn.bind(on_press=kaliteApp.reload_content)\n\t\tspace_holder = _BoxLayout(orientation='horizontal', pos_hint={'x': .8})\n\t\tlogo_holder.add_widget(space_holder)\n\n\t\tbuttons_holder = AnchorLayout(anchor_x='center', anchor_y='center')\n\n\t\tdropdown.add_widget(self.content_reload_btn)\n\t\tlogo_holder.add_widget(dropdown_btn)\n\t\tlogo_holder.spacing = [300, 0]\n\t\tself.root_layout.add_widget(logo_holder)\n\n\t\tself.img_holder = BoxLayout(orientation='vertical', size=(200,200), size_hint=(1, None))\n\t\tself.img_holder.padding = [0,80,0,10]\n\t\tself.root_layout.add_widget(self.img_holder)\n\n\t\tself.progress_bar = ProgressBar()\n\n\t\tself.messages = BoxLayout(orientation='vertical')\n\n\t\tself.root_layout.add_widget(self.messages)\n\t\tself.root_layout.add_widget(buttons_holder)\n\t\tself.root_layout.add_widget(self.progress_bar)\n\n\tdef disable_reload_bnt(self):\n\t\tself.content_reload_btn.disabled = True\n\n\tdef get_root_Layout(self):\n\t\treturn self.root_layout\n\n\tdef add_messages(self, message):\n\t\tself.messages.add_widget(message)\n\n\tdef remove_messages(self, message):\n\t\tself.messages.remove_widget(message)\n\n\tdef add_loading_gif(self):\n\t\tself.gif_img = Image(source='loading.zip', anim_delay = 0.15)\n\t\tself.img_holder.add_widget(self.gif_img)\n\n\tdef remove_loading_gif(self):\n\t\tself.img_holder.remove_widget(self.gif_img)\n\n\tdef start_progress_bar(self, anim_value):\n\t\tself.anim = Animation(value = anim_value, duration = 3)\n\t\tself.anim.start(self.progress_bar)\n\n\tdef animation_bind(self, bindFunction):\n\t\tself.anim.bind(on_complete = bindFunction)\n\n\n",
"step-ids": [
9,
11,
14,
15,
16
]
}
|
[
9,
11,
14,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(v1)
<|reserved_special_token_0|>
print(v2)
<|reserved_special_token_0|>
print(v3)
<|reserved_special_token_0|>
print(v4)
<|reserved_special_token_1|>
v1 = 3 + 4 * 2
print(v1)
v2 = (2 + 6) * 2
print(v2)
v3 = 2 ** 3 ** 2
print(v3)
v4 = 20 + 80 / 2
print(v4)
|
flexible
|
{
"blob_id": "e6694403eecf2c4511c1fce959b5939f5f457bb8",
"index": 9384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(v1)\n<mask token>\nprint(v2)\n<mask token>\nprint(v3)\n<mask token>\nprint(v4)\n",
"step-3": "v1 = 3 + 4 * 2\nprint(v1)\nv2 = (2 + 6) * 2\nprint(v2)\nv3 = 2 ** 3 ** 2\nprint(v3)\nv4 = 20 + 80 / 2\nprint(v4)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import random
# library to create window in the terminal
import curses
# initialized curses by returning a window object
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(True)
curses.curs_set(0)
height, width = stdscr.getmaxyx()
# create a new window of a given size
window = curses.newwin(height, width, 0, 0)
window.keypad(1)
window.timeout(100)
# snake's form
snk_x = width/4
snk_y = height/2
# initialize snake's size to 3
snake = [
[snk_y, snk_x],
[snk_y, snk_x-1],
[snk_y, snk_x-2]
]
# food's size
food = [height/2, width/2]
# add first food in the window
window.addch(int(food[0]), int(food[1]), curses.ACS_PI)
# snake initializes direction to right
key = curses.KEY_RIGHT
# main of snake game
while True:
next_key = window.getch()
key = key if next_key == -1 else next_key
if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0] in snake[1:]:
curses.endwin()
quit()
new_head = [snake[0][0], snake[0][1]]
if key == curses.KEY_DOWN:
new_head[0] += 1
if key == curses.KEY_UP:
new_head[0] -= 1
if key == curses.KEY_LEFT:
new_head[1] -= 1
if key == curses.KEY_RIGHT:
new_head[1] += 1
snake.insert(0, new_head)
if snake[0] == food:
food = None
while food is None:
nf = [ random.randint(1, height-1), random.randint(1, width-1)]
food = nf if nf not in snake else None
window.addch(food[0], food[1], curses.ACS_PI)
else:
tail = snake.pop()
window.addch(int(tail[0]), int(tail[1]), ' ')
window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)
|
normal
|
{
"blob_id": "153d37b58a10847aae1fa7dbec4c7576c3d97fb2",
"index": 3407,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncurses.noecho()\ncurses.cbreak()\nstdscr.keypad(True)\ncurses.curs_set(0)\n<mask token>\nwindow.keypad(1)\nwindow.timeout(100)\n<mask token>\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\n<mask token>\nwhile True:\n next_key = window.getch()\n key = key if next_key == -1 else next_key\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0\n ] in snake[1:]:\n curses.endwin()\n quit()\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n snake.insert(0, new_head)\n if snake[0] == food:\n food = None\n while food is None:\n nf = [random.randint(1, height - 1), random.randint(1, width - 1)]\n food = nf if nf not in snake else None\n window.addch(food[0], food[1], curses.ACS_PI)\n else:\n tail = snake.pop()\n window.addch(int(tail[0]), int(tail[1]), ' ')\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\n",
"step-3": "<mask token>\nstdscr = curses.initscr()\ncurses.noecho()\ncurses.cbreak()\nstdscr.keypad(True)\ncurses.curs_set(0)\nheight, width = stdscr.getmaxyx()\nwindow = curses.newwin(height, width, 0, 0)\nwindow.keypad(1)\nwindow.timeout(100)\nsnk_x = width / 4\nsnk_y = height / 2\nsnake = [[snk_y, snk_x], [snk_y, snk_x - 1], [snk_y, snk_x - 2]]\nfood = [height / 2, width / 2]\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\nkey = curses.KEY_RIGHT\nwhile True:\n next_key = window.getch()\n key = key if next_key == -1 else next_key\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0\n ] in snake[1:]:\n curses.endwin()\n quit()\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n snake.insert(0, new_head)\n if snake[0] == food:\n food = None\n while food is None:\n nf = [random.randint(1, height - 1), random.randint(1, width - 1)]\n food = nf if nf not in snake else None\n window.addch(food[0], food[1], curses.ACS_PI)\n else:\n tail = snake.pop()\n window.addch(int(tail[0]), int(tail[1]), ' ')\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\n",
"step-4": "import random\nimport curses\nstdscr = curses.initscr()\ncurses.noecho()\ncurses.cbreak()\nstdscr.keypad(True)\ncurses.curs_set(0)\nheight, width = stdscr.getmaxyx()\nwindow = curses.newwin(height, width, 0, 0)\nwindow.keypad(1)\nwindow.timeout(100)\nsnk_x = width / 4\nsnk_y = height / 2\nsnake = [[snk_y, snk_x], [snk_y, snk_x - 1], [snk_y, snk_x - 2]]\nfood = [height / 2, width / 2]\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\nkey = curses.KEY_RIGHT\nwhile True:\n next_key = window.getch()\n key = key if next_key == -1 else next_key\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0\n ] in snake[1:]:\n curses.endwin()\n quit()\n new_head = [snake[0][0], snake[0][1]]\n if key == curses.KEY_DOWN:\n new_head[0] += 1\n if key == curses.KEY_UP:\n new_head[0] -= 1\n if key == curses.KEY_LEFT:\n new_head[1] -= 1\n if key == curses.KEY_RIGHT:\n new_head[1] += 1\n snake.insert(0, new_head)\n if snake[0] == food:\n food = None\n while food is None:\n nf = [random.randint(1, height - 1), random.randint(1, width - 1)]\n food = nf if nf not in snake else None\n window.addch(food[0], food[1], curses.ACS_PI)\n else:\n tail = snake.pop()\n window.addch(int(tail[0]), int(tail[1]), ' ')\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\n",
"step-5": "import random\r\n\r\n# library to create window in the terminal\r\nimport curses \r\n\r\n# initialized curses by returning a window object\r\nstdscr = curses.initscr()\r\ncurses.noecho()\r\ncurses.cbreak()\r\nstdscr.keypad(True)\r\ncurses.curs_set(0)\r\nheight, width = stdscr.getmaxyx()\r\n\r\n# create a new window of a given size\r\nwindow = curses.newwin(height, width, 0, 0)\r\nwindow.keypad(1)\r\nwindow.timeout(100)\r\n\r\n# snake's form\r\nsnk_x = width/4\r\nsnk_y = height/2\r\n\r\n# initialize snake's size to 3\r\nsnake = [\r\n [snk_y, snk_x],\r\n [snk_y, snk_x-1],\r\n [snk_y, snk_x-2]\r\n]\r\n\r\n# food's size\r\nfood = [height/2, width/2]\r\n\r\n# add first food in the window\r\nwindow.addch(int(food[0]), int(food[1]), curses.ACS_PI)\r\n\r\n# snake initializes direction to right\r\nkey = curses.KEY_RIGHT\r\n\r\n# main of snake game \r\nwhile True:\r\n next_key = window.getch()\r\n key = key if next_key == -1 else next_key\r\n\r\n if snake[0][0] in [0, height] or snake[0][1] in [0, width] or snake[0] in snake[1:]:\r\n curses.endwin()\r\n quit()\r\n\r\n new_head = [snake[0][0], snake[0][1]]\r\n\r\n if key == curses.KEY_DOWN:\r\n new_head[0] += 1\r\n if key == curses.KEY_UP:\r\n new_head[0] -= 1\r\n if key == curses.KEY_LEFT:\r\n new_head[1] -= 1\r\n if key == curses.KEY_RIGHT:\r\n new_head[1] += 1\r\n\r\n snake.insert(0, new_head)\r\n\r\n if snake[0] == food:\r\n food = None\r\n while food is None:\r\n nf = [ random.randint(1, height-1), random.randint(1, width-1)]\r\n food = nf if nf not in snake else None\r\n window.addch(food[0], food[1], curses.ACS_PI)\r\n else:\r\n tail = snake.pop()\r\n window.addch(int(tail[0]), int(tail[1]), ' ')\r\n \r\n window.addch(int(snake[0][0]), int(snake[0][1]), curses.ACS_CKBOARD)\r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from sys import exit
# Outside
def outside():
print """
Oyoiyoi ... The train isn't running due to the HVV being complete crap.
Well, the Rewe around the corner is still open
and there's a HSV bar around the corner.
You want to get your drank on, right now!
Where do you go?
"""
choice = raw_input("> ")
if choice == "Rewe":
print "Off to Rewe!"
Rewe()
elif choice == "HSV bar":
print "Off to the HSV bar!"
bar_sober()
else:
die()
# Outsid - Rewe
def Rewe():
print """
Ohhaohaoha...Rewe ist packed!
..and you're still sober!
What is this, gradeschool?
Forget about the beer, just get in line at the register
and grab whatever hard liquor is at kids' reach
(way to go Germany..). \n
\n
Alrgiht, now you're back outside
and your nipps are about to freeze off!
Where are you going to go now?
Time for the HSV bar,
or are you done with the world and want to just go home?
"""
choice = raw_input("> ")
if choice == "HSV bar":
print "To the HSV bar!"
way_bar()
elif choice == "go home":
print "Homeward bound!"
way_inside()
else:
die()
# Outside - Rewe - way to the bar
def way_bar():
print """
You managed to grab a box of some good ol' German Schnapps!
These 12 little babies are gonna get you in the right kind of mood.
You have about a 5 to 10 minute walk ahead of you..
How many mini bottles of goodness will you gulp down on the way?
"""
choice = raw_input("> ")
how_much = int(choice)
if how_much < 3:
bar_sober()
elif 6 > how_much >= 3:
bar_buzzed()
else:
loose("""
Well, I mean you did want to get wierd tonight,
but this just escalated far too quickly! You need
to get ahold of yourself! Now you've thrown your cookies
all over the sidewalk..Though I am a bit proud of you, you better just
go home and sleep..if you can find your way.
""")
# Outside - Rewe - way back home
def way_inside():
print """
You managed to grab a box of some good ol' German Schnapps!
These 12 little babies are gonna get you in the right kind of mood.
You have about a 5 to 10 minute walk ahead of you..
How many mini bottles of goodness will you gulp down on the way?
"""
choice = raw_input("> ")
how_much = int(choice)
if how_much < 3:
inside_sober()
elif 6 > how_much >= 3:
inside_buzzed()
else:
loose("""
Well, I mean you did want to get wierd tonight,
but this just escalated far too quickly! You need
to get ahold of yourself! Now you've thrown your cookies
all over the sidewalk..Though I am a bit proud of you, you better just
go home and sleep..if you can find your way.
""")
# Outside - Rewe - Inside(buzzed)
def inside_buzzed():
print """
Now you're a little buzzed and all warm in your humble abode!
You could kick it here with ya bad self, or ask if some peeps want to
come over.
Do you want to invite people to come get wrecked with you?
"""
choice = raw_input("> ")
if choice == "yes":
print "Okay, let's get this party goin'!"
inside_buzzed_invite()
elif choice == "no":
print "There's only enough liquor for numero uno."
inside_buzzed_alone()
else:
die()
# Outside - Rewe - Inside(buzzed) Invite
def inside_buzzed_invite():
print """
Congrats..
Both of your two friends are busy.
Well, so much for trying to be social!
Guess you'll be hanging out alone after all.
"""
inside_buzzed_alone()
# Outside - Rewe - Inside(buzzed) ALone
def inside_buzzed_alone():
print """
Now you're a little buzzed and all warm in your humble abode!
Time to watch episodes of 'Intervention'
and drink everytime someone makes a worse life choice
than you have!
"""
win("Yay for not being at the very bottom!")
# Inside (sober)
def inside_sober():
print """
Alright alright alright.
You could kick it here with ya bad self, or ask if some peeps want to
come over.
Do you want to invite people to come get wrecked with you?
"""
choice = raw_input("> ")
if choice == "yes":
print "It'll be nice to have some social interaction."
inside_sober_invite()
elif choice == "no":
print "Ew gross, people are icky."
inside_sober_alone()
else:
die()
# Inside (sober) invite
def inside_sober_invite():
print """
Wow you're feeling socially acceptable today!
Three people are now at your place and you don't have much alcohol.
Way to go, you anti-social worm.
You're not drunk enough to be entertaining!
You forgot you can't handle being responsible for social encounters.
Akwardness.
Overwhelms.
You!
"""
loose("You're an anxious mess.")
# Inside(sober) - Alone
def inside_sober_alone():
print """
Wohoo! Time to drink what you've got and play some sweet video games until your eyes bleed!
Who needs other people to enjoy themselves?
Being socially awkward rules!
And the best part is:
You don't have to wear pants!
"""
win("This is the (anti-social) life!")
# Outside - Rewe - bar(buzzed)
def bar_buzzed():
print """
On the way to the bar, you see the disco lights flashing
and you can here the German Schlager music being accompanied
by the voices of old people.
Nice.
The few bottles of liquor you drank are kicking in just in time!
You've consumed the perfect amount for this kind of thing!
Once you get in the bar everyone cheers, even though you don't know them!
Some old lady is celebrating the death of her husband and buying rounds
for everyone.
"""
win("You hit the party jackpot!")
# Outside - Bar(sober)
def bar_sober():
print """
So now you're inside, and people seem to be having a good time.
The problem is: they are drunk; you are not!
You then realize that you can't pay with card here
and you don't have enough cash for a drink..
Even if you brought booze with you, you wouldn't be able to
drink it in here. Way to go..
Because you're too sober to be socially acceptable, you can't
find the courage to ask the people celebrating if you can join.
"""
loose("You're uncomfortable and go home as the anxious mess that alaways had been.")
# End of game, added to the variable of "why"
def win(why):
print why, " Bitchin'."
exit(0)
def loose(why):
print why, " Laaame.."
exit (0)
def die():
print """
How dare you think out of the box?! You die sober!!
"""
# Begining of game
def start():
print """
It's Friday night and you want to get hammered!
Do you want to go out or stay home?
"""
choice = raw_input("> ")
if choice == "out":
outside()
elif choice == "stay home":
inside_sober()
else:
die()
start()
|
normal
|
{
"blob_id": "b3bace532f687edc966c6aef5f454bde9367204f",
"index": 4500,
"step-1": "from sys import exit\n\n# Outside\ndef outside():\n print \"\"\"\n Oyoiyoi ... The train isn't running due to the HVV being complete crap.\n Well, the Rewe around the corner is still open\n and there's a HSV bar around the corner.\n You want to get your drank on, right now!\n Where do you go?\n \"\"\"\n choice = raw_input(\"> \")\n if choice == \"Rewe\":\n print \"Off to Rewe!\"\n Rewe()\n elif choice == \"HSV bar\":\n print \"Off to the HSV bar!\"\n bar_sober()\n else:\n die()\n\n# Outsid - Rewe\ndef Rewe():\n print \"\"\"\n Ohhaohaoha...Rewe ist packed!\n ..and you're still sober!\n What is this, gradeschool?\n Forget about the beer, just get in line at the register\n and grab whatever hard liquor is at kids' reach\n (way to go Germany..). \\n\n \\n\n Alrgiht, now you're back outside\n and your nipps are about to freeze off!\n Where are you going to go now?\n Time for the HSV bar,\n or are you done with the world and want to just go home?\n \"\"\"\n choice = raw_input(\"> \")\n if choice == \"HSV bar\":\n print \"To the HSV bar!\"\n way_bar()\n elif choice == \"go home\":\n print \"Homeward bound!\"\n way_inside()\n else:\n die()\n\n# Outside - Rewe - way to the bar\ndef way_bar():\n print \"\"\"\n You managed to grab a box of some good ol' German Schnapps!\n These 12 little babies are gonna get you in the right kind of mood.\n You have about a 5 to 10 minute walk ahead of you..\n How many mini bottles of goodness will you gulp down on the way?\n \"\"\"\n choice = raw_input(\"> \")\n how_much = int(choice)\n\n if how_much < 3:\n bar_sober()\n elif 6 > how_much >= 3:\n bar_buzzed()\n else:\n loose(\"\"\"\n Well, I mean you did want to get wierd tonight,\n but this just escalated far too quickly! You need\n to get ahold of yourself! Now you've thrown your cookies\n all over the sidewalk..Though I am a bit proud of you, you better just\n go home and sleep..if you can find your way.\n \"\"\")\n\n# Outside - Rewe - way back home\ndef way_inside():\n print \"\"\"\n You managed to grab a box of some good ol' German Schnapps!\n These 12 little babies are gonna get you in the right kind of mood.\n You have about a 5 to 10 minute walk ahead of you..\n How many mini bottles of goodness will you gulp down on the way?\n \"\"\"\n choice = raw_input(\"> \")\n how_much = int(choice)\n\n if how_much < 3:\n inside_sober()\n elif 6 > how_much >= 3:\n inside_buzzed()\n else:\n loose(\"\"\"\n Well, I mean you did want to get wierd tonight,\n but this just escalated far too quickly! You need\n to get ahold of yourself! Now you've thrown your cookies\n all over the sidewalk..Though I am a bit proud of you, you better just\n go home and sleep..if you can find your way.\n \"\"\")\n\n# Outside - Rewe - Inside(buzzed)\ndef inside_buzzed():\n print \"\"\"\n Now you're a little buzzed and all warm in your humble abode!\n You could kick it here with ya bad self, or ask if some peeps want to\n come over.\n Do you want to invite people to come get wrecked with you?\n \"\"\"\n choice = raw_input(\"> \")\n\n if choice == \"yes\":\n print \"Okay, let's get this party goin'!\"\n inside_buzzed_invite()\n elif choice == \"no\":\n print \"There's only enough liquor for numero uno.\"\n inside_buzzed_alone()\n else:\n die()\n\n\n# Outside - Rewe - Inside(buzzed) Invite\ndef inside_buzzed_invite():\n print \"\"\"\n Congrats..\n Both of your two friends are busy.\n Well, so much for trying to be social!\n Guess you'll be hanging out alone after all.\n \"\"\"\n inside_buzzed_alone()\n\n# Outside - Rewe - Inside(buzzed) ALone\ndef inside_buzzed_alone():\n print \"\"\"\n Now you're a little buzzed and all warm in your humble abode!\n Time to watch episodes of 'Intervention'\n and drink everytime someone makes a worse life choice\n than you have!\n \"\"\"\n win(\"Yay for not being at the very bottom!\")\n\n# Inside (sober)\ndef inside_sober():\n print \"\"\"\n Alright alright alright.\n You could kick it here with ya bad self, or ask if some peeps want to\n come over.\n Do you want to invite people to come get wrecked with you?\n \"\"\"\n choice = raw_input(\"> \")\n\n if choice == \"yes\":\n print \"It'll be nice to have some social interaction.\"\n inside_sober_invite()\n elif choice == \"no\":\n print \"Ew gross, people are icky.\"\n inside_sober_alone()\n else:\n die()\n\n# Inside (sober) invite\ndef inside_sober_invite():\n print \"\"\"\n Wow you're feeling socially acceptable today!\n Three people are now at your place and you don't have much alcohol.\n Way to go, you anti-social worm.\n You're not drunk enough to be entertaining!\n You forgot you can't handle being responsible for social encounters.\n Akwardness.\n Overwhelms.\n You!\n \"\"\"\n loose(\"You're an anxious mess.\")\n\n# Inside(sober) - Alone\ndef inside_sober_alone():\n print \"\"\"\n Wohoo! Time to drink what you've got and play some sweet video games until your eyes bleed!\n Who needs other people to enjoy themselves?\n Being socially awkward rules!\n And the best part is:\n You don't have to wear pants!\n \"\"\"\n win(\"This is the (anti-social) life!\")\n\n\n# Outside - Rewe - bar(buzzed)\ndef bar_buzzed():\n print \"\"\"\n On the way to the bar, you see the disco lights flashing\n and you can here the German Schlager music being accompanied\n by the voices of old people.\n Nice.\n The few bottles of liquor you drank are kicking in just in time!\n You've consumed the perfect amount for this kind of thing!\n Once you get in the bar everyone cheers, even though you don't know them!\n Some old lady is celebrating the death of her husband and buying rounds\n for everyone.\n \"\"\"\n win(\"You hit the party jackpot!\")\n\n# Outside - Bar(sober)\ndef bar_sober():\n print \"\"\"\n So now you're inside, and people seem to be having a good time.\n The problem is: they are drunk; you are not!\n You then realize that you can't pay with card here\n and you don't have enough cash for a drink..\n Even if you brought booze with you, you wouldn't be able to\n drink it in here. Way to go..\n Because you're too sober to be socially acceptable, you can't\n find the courage to ask the people celebrating if you can join.\n \"\"\"\n loose(\"You're uncomfortable and go home as the anxious mess that alaways had been.\")\n\n\n# End of game, added to the variable of \"why\"\ndef win(why):\n print why, \" Bitchin'.\"\n exit(0)\n\ndef loose(why):\n print why, \" Laaame..\"\n exit (0)\n\ndef die():\n print \"\"\"\n How dare you think out of the box?! You die sober!!\n \"\"\"\n\n\n# Begining of game\ndef start():\n print \"\"\"\n It's Friday night and you want to get hammered!\n Do you want to go out or stay home?\n \"\"\"\n choice = raw_input(\"> \")\n\n if choice == \"out\":\n outside()\n elif choice == \"stay home\":\n inside_sober()\n else:\n die()\n\nstart()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class StudentDebtsController:
def __init__(self, master, model, view):
self._master = master
self._model = model
self._view = view
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StudentDebtsController:
def __init__(self, master, model, view):
self._master = master
self._model = model
self._view = view
<|reserved_special_token_0|>
def GetStudentsInfo(self, text):
studentsList = self._model.GetStudentsList(text)
return studentsList
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StudentDebtsController:
def __init__(self, master, model, view):
self._master = master
self._model = model
self._view = view
def BackToAdminPage(self):
from Views.AdminPage import AdminPage
self._master.switch_frame(AdminPage, AdminPageModel)
def GetStudentsInfo(self, text):
studentsList = self._model.GetStudentsList(text)
return studentsList
def GetStudentDebtsAndShowTable(self, text):
self._view.HideUserInfo()
if not text:
self._view.ClearTable()
self._view.ShowNoDataLabelWithText(
'No issues found. Select user first.')
else:
self._view.ClearTable()
info = self._model.GetStudentInfo(text)
self._view.ShowUserInfo(info[0], info[1], info[2], info[3])
studentDebts = self._model.GetStudentDebts(text)
if len(studentDebts) > 0:
self._view.HideNoDataLabel()
else:
self._view.ShowNoDataLabelWithText(
"Student don't have issues yet.")
self._view.FillTable(studentDebts)
def ReturnBooks(self, idCopies):
if len(idCopies) > 0:
try:
id = self._model.GetStudentId(idCopies[0])
self._model.ReturnBooks(idCopies)
self._view.ClearTable()
studentDebts = self._model.GetStudentDebts(id)
self._view.FillTable(studentDebts)
singOrPlural = 'Book'
if len(idCopies) > 1:
singOrPlural = 'Books'
self._view.SetMessageLabel(singOrPlural +
' successfully returned', 'green')
except Exception as e:
print(e)
self._view.SetMessageLabel('Something went wrong', 'red')
else:
self._view.SetMessageLabel(
'0 books have been selected. Nothing to return', 'red')
<|reserved_special_token_1|>
from Models.AdminPageModel import AdminPageModel
class StudentDebtsController:
def __init__(self, master, model, view):
self._master = master
self._model = model
self._view = view
def BackToAdminPage(self):
from Views.AdminPage import AdminPage
self._master.switch_frame(AdminPage, AdminPageModel)
def GetStudentsInfo(self, text):
studentsList = self._model.GetStudentsList(text)
return studentsList
def GetStudentDebtsAndShowTable(self, text):
self._view.HideUserInfo()
if not text:
self._view.ClearTable()
self._view.ShowNoDataLabelWithText(
'No issues found. Select user first.')
else:
self._view.ClearTable()
info = self._model.GetStudentInfo(text)
self._view.ShowUserInfo(info[0], info[1], info[2], info[3])
studentDebts = self._model.GetStudentDebts(text)
if len(studentDebts) > 0:
self._view.HideNoDataLabel()
else:
self._view.ShowNoDataLabelWithText(
"Student don't have issues yet.")
self._view.FillTable(studentDebts)
def ReturnBooks(self, idCopies):
if len(idCopies) > 0:
try:
id = self._model.GetStudentId(idCopies[0])
self._model.ReturnBooks(idCopies)
self._view.ClearTable()
studentDebts = self._model.GetStudentDebts(id)
self._view.FillTable(studentDebts)
singOrPlural = 'Book'
if len(idCopies) > 1:
singOrPlural = 'Books'
self._view.SetMessageLabel(singOrPlural +
' successfully returned', 'green')
except Exception as e:
print(e)
self._view.SetMessageLabel('Something went wrong', 'red')
else:
self._view.SetMessageLabel(
'0 books have been selected. Nothing to return', 'red')
<|reserved_special_token_1|>
from Models.AdminPageModel import AdminPageModel
class StudentDebtsController:
def __init__(self, master, model, view):
self._master = master
self._model = model
self._view = view
def BackToAdminPage(self):
from Views.AdminPage import AdminPage
self._master.switch_frame(AdminPage, AdminPageModel)
def GetStudentsInfo(self, text):
studentsList = self._model.GetStudentsList(text)
return studentsList
def GetStudentDebtsAndShowTable(self, text):
self._view.HideUserInfo()
if (not text):
self._view.ClearTable()
self._view.ShowNoDataLabelWithText("No issues found. Select user first.")
else:
self._view.ClearTable()
info = self._model.GetStudentInfo(text)
self._view.ShowUserInfo(info[0], info[1], info[2], info[3])
studentDebts = self._model.GetStudentDebts(text)
if (len(studentDebts) > 0):
self._view.HideNoDataLabel()
else:
self._view.ShowNoDataLabelWithText("Student don't have issues yet.")
self._view.FillTable(studentDebts)
def ReturnBooks(self, idCopies):
if (len(idCopies) > 0):
try:
id = self._model.GetStudentId(idCopies[0])
self._model.ReturnBooks(idCopies)
self._view.ClearTable()
studentDebts = self._model.GetStudentDebts(id)
self._view.FillTable(studentDebts)
singOrPlural = 'Book'
if (len(idCopies) > 1):
singOrPlural = 'Books'
self._view.SetMessageLabel(singOrPlural + " successfully returned" , "green")
except Exception as e:
print(e)
self._view.SetMessageLabel("Something went wrong", "red")
else:
self._view.SetMessageLabel("0 books have been selected. Nothing to return", "red")
|
flexible
|
{
"blob_id": "8aacc8dbfdd70d24689ae17b9c29b1ffc80fb231",
"index": 9013,
"step-1": "<mask token>\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n <mask token>\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n\n def BackToAdminPage(self):\n from Views.AdminPage import AdminPage\n self._master.switch_frame(AdminPage, AdminPageModel)\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n\n def GetStudentDebtsAndShowTable(self, text):\n self._view.HideUserInfo()\n if not text:\n self._view.ClearTable()\n self._view.ShowNoDataLabelWithText(\n 'No issues found. Select user first.')\n else:\n self._view.ClearTable()\n info = self._model.GetStudentInfo(text)\n self._view.ShowUserInfo(info[0], info[1], info[2], info[3])\n studentDebts = self._model.GetStudentDebts(text)\n if len(studentDebts) > 0:\n self._view.HideNoDataLabel()\n else:\n self._view.ShowNoDataLabelWithText(\n \"Student don't have issues yet.\")\n self._view.FillTable(studentDebts)\n\n def ReturnBooks(self, idCopies):\n if len(idCopies) > 0:\n try:\n id = self._model.GetStudentId(idCopies[0])\n self._model.ReturnBooks(idCopies)\n self._view.ClearTable()\n studentDebts = self._model.GetStudentDebts(id)\n self._view.FillTable(studentDebts)\n singOrPlural = 'Book'\n if len(idCopies) > 1:\n singOrPlural = 'Books'\n self._view.SetMessageLabel(singOrPlural +\n ' successfully returned', 'green')\n except Exception as e:\n print(e)\n self._view.SetMessageLabel('Something went wrong', 'red')\n else:\n self._view.SetMessageLabel(\n '0 books have been selected. Nothing to return', 'red')\n",
"step-4": "from Models.AdminPageModel import AdminPageModel\n\n\nclass StudentDebtsController:\n\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n\n def BackToAdminPage(self):\n from Views.AdminPage import AdminPage\n self._master.switch_frame(AdminPage, AdminPageModel)\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n\n def GetStudentDebtsAndShowTable(self, text):\n self._view.HideUserInfo()\n if not text:\n self._view.ClearTable()\n self._view.ShowNoDataLabelWithText(\n 'No issues found. Select user first.')\n else:\n self._view.ClearTable()\n info = self._model.GetStudentInfo(text)\n self._view.ShowUserInfo(info[0], info[1], info[2], info[3])\n studentDebts = self._model.GetStudentDebts(text)\n if len(studentDebts) > 0:\n self._view.HideNoDataLabel()\n else:\n self._view.ShowNoDataLabelWithText(\n \"Student don't have issues yet.\")\n self._view.FillTable(studentDebts)\n\n def ReturnBooks(self, idCopies):\n if len(idCopies) > 0:\n try:\n id = self._model.GetStudentId(idCopies[0])\n self._model.ReturnBooks(idCopies)\n self._view.ClearTable()\n studentDebts = self._model.GetStudentDebts(id)\n self._view.FillTable(studentDebts)\n singOrPlural = 'Book'\n if len(idCopies) > 1:\n singOrPlural = 'Books'\n self._view.SetMessageLabel(singOrPlural +\n ' successfully returned', 'green')\n except Exception as e:\n print(e)\n self._view.SetMessageLabel('Something went wrong', 'red')\n else:\n self._view.SetMessageLabel(\n '0 books have been selected. Nothing to return', 'red')\n",
"step-5": "\nfrom Models.AdminPageModel import AdminPageModel\n\nclass StudentDebtsController:\n def __init__(self, master, model, view):\n self._master = master\n self._model = model\n self._view = view\n\n\n def BackToAdminPage(self):\n from Views.AdminPage import AdminPage\n self._master.switch_frame(AdminPage, AdminPageModel)\n\n def GetStudentsInfo(self, text):\n studentsList = self._model.GetStudentsList(text)\n return studentsList\n\n def GetStudentDebtsAndShowTable(self, text):\n self._view.HideUserInfo()\n if (not text):\n self._view.ClearTable()\n self._view.ShowNoDataLabelWithText(\"No issues found. Select user first.\")\n else:\n self._view.ClearTable()\n info = self._model.GetStudentInfo(text)\n self._view.ShowUserInfo(info[0], info[1], info[2], info[3])\n studentDebts = self._model.GetStudentDebts(text)\n if (len(studentDebts) > 0):\n self._view.HideNoDataLabel()\n else:\n self._view.ShowNoDataLabelWithText(\"Student don't have issues yet.\")\n self._view.FillTable(studentDebts)\n\n\n def ReturnBooks(self, idCopies):\n if (len(idCopies) > 0):\n try:\n id = self._model.GetStudentId(idCopies[0])\n self._model.ReturnBooks(idCopies)\n self._view.ClearTable()\n studentDebts = self._model.GetStudentDebts(id)\n self._view.FillTable(studentDebts)\n singOrPlural = 'Book'\n if (len(idCopies) > 1):\n singOrPlural = 'Books'\n self._view.SetMessageLabel(singOrPlural + \" successfully returned\" , \"green\")\n except Exception as e:\n print(e)\n self._view.SetMessageLabel(\"Something went wrong\", \"red\")\n else:\n self._view.SetMessageLabel(\"0 books have been selected. Nothing to return\", \"red\")\n\n ",
"step-ids": [
2,
3,
6,
7,
8
]
}
|
[
2,
3,
6,
7,
8
] |
<|reserved_special_token_0|>
class Tarefas(Screen):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text=tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text=tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
class Gerenciador(ScreenManager):
pass
class Menu(Screen):
pass
class Tarefas(Screen):
def __init__(self, tarefas=[], **kwargs):
super().__init__(**kwargs)
for tarefa in tarefas:
self.ids.box.add_widget(Tarefa(text=tarefa))
def addWidget(self):
texto = self.ids.texto.text
self.ids.box.add_widget(Tarefa(text=texto))
self.ids.texto.text = ''
class Tarefa(BoxLayout):
def __init__(self, text='', **kwargs):
super().__init__(**kwargs)
self.ids.label.text = text
class Test(App):
def build(self):
return Gerenciador()
Test().run()
|
flexible
|
{
"blob_id": "66b42791325a53172d4514cdd16ccd58d4edb186",
"index": 2409,
"step-1": "<mask token>\n\n\nclass Tarefas(Screen):\n <mask token>\n <mask token>\n\n\nclass Tarefa(BoxLayout):\n\n def __init__(self, text='', **kwargs):\n super().__init__(**kwargs)\n self.ids.label.text = text\n\n\nclass Test(App):\n\n def build(self):\n return Gerenciador()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Menu(Screen):\n pass\n\n\nclass Tarefas(Screen):\n\n def __init__(self, tarefas=[], **kwargs):\n super().__init__(**kwargs)\n for tarefa in tarefas:\n self.ids.box.add_widget(Tarefa(text=tarefa))\n\n def addWidget(self):\n texto = self.ids.texto.text\n self.ids.box.add_widget(Tarefa(text=texto))\n self.ids.texto.text = ''\n\n\nclass Tarefa(BoxLayout):\n\n def __init__(self, text='', **kwargs):\n super().__init__(**kwargs)\n self.ids.label.text = text\n\n\nclass Test(App):\n\n def build(self):\n return Gerenciador()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Gerenciador(ScreenManager):\n pass\n\n\nclass Menu(Screen):\n pass\n\n\nclass Tarefas(Screen):\n\n def __init__(self, tarefas=[], **kwargs):\n super().__init__(**kwargs)\n for tarefa in tarefas:\n self.ids.box.add_widget(Tarefa(text=tarefa))\n\n def addWidget(self):\n texto = self.ids.texto.text\n self.ids.box.add_widget(Tarefa(text=texto))\n self.ids.texto.text = ''\n\n\nclass Tarefa(BoxLayout):\n\n def __init__(self, text='', **kwargs):\n super().__init__(**kwargs)\n self.ids.label.text = text\n\n\nclass Test(App):\n\n def build(self):\n return Gerenciador()\n\n\n<mask token>\n",
"step-4": "from kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import ScreenManager, Screen\n\n\nclass Gerenciador(ScreenManager):\n pass\n\n\nclass Menu(Screen):\n pass\n\n\nclass Tarefas(Screen):\n\n def __init__(self, tarefas=[], **kwargs):\n super().__init__(**kwargs)\n for tarefa in tarefas:\n self.ids.box.add_widget(Tarefa(text=tarefa))\n\n def addWidget(self):\n texto = self.ids.texto.text\n self.ids.box.add_widget(Tarefa(text=texto))\n self.ids.texto.text = ''\n\n\nclass Tarefa(BoxLayout):\n\n def __init__(self, text='', **kwargs):\n super().__init__(**kwargs)\n self.ids.label.text = text\n\n\nclass Test(App):\n\n def build(self):\n return Gerenciador()\n\n\nTest().run()\n",
"step-5": null,
"step-ids": [
5,
8,
9,
11
]
}
|
[
5,
8,
9,
11
] |
<|reserved_special_token_0|>
class BaseConnection(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
<|reserved_special_token_0|>
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
<|reserved_special_token_0|>
def __enter__(self):
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
<|reserved_special_token_0|>
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseConnection(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None
):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,
interpreter)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
<|reserved_special_token_0|>
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None
):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,
interpreter)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
<|reserved_special_token_0|>
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None
):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,
interpreter)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug(
'trying to determine remote python executable with %s' % executable
)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' %
executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter
)
return conn.interpreter
<|reserved_special_token_1|>
import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
|
flexible
|
{
"blob_id": "ae38995d153deed2e6049b7b65fb5f28dfcef470",
"index": 1442,
"step-1": "<mask token>\n\n\nclass BaseConnection(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n <mask token>\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n <mask token>\n\n def __enter__(self):\n return self\n <mask token>\n <mask token>\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\n<mask token>\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseConnection(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None\n ):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,\n interpreter)\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\n<mask token>\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseConnection(object):\n \"\"\"\n Base class for Connection objects. Provides a generic interface to execnet\n for setting up the connection\n \"\"\"\n executable = ''\n remote_import_system = 'legacy'\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None\n ):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,\n interpreter)\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\n<mask token>\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\ndef basic_remote_logger():\n logging.basicConfig()\n logger = logging.getLogger(socket.gethostname())\n logger.setLevel(logging.DEBUG)\n return logger\n\n\ndef needs_ssh(hostname, _socket=None):\n \"\"\"\n Obtains remote hostname of the socket and cuts off the domain part\n of its FQDN.\n \"\"\"\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "import inspect\nimport json\nimport socket\nimport sys\nimport execnet\nimport logging\nfrom remoto.process import check\n\n\nclass BaseConnection(object):\n \"\"\"\n Base class for Connection objects. Provides a generic interface to execnet\n for setting up the connection\n \"\"\"\n executable = ''\n remote_import_system = 'legacy'\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None\n ):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,\n interpreter)\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\ndump_template = \"\"\"\nif __name__ == '__main__':\n import json, traceback\n obj = {'return': None, 'exception': None}\n try:\n obj['return'] = %s%s\n except Exception:\n obj['exception'] = traceback.format_exc()\n try:\n print(json.dumps(obj).decode('utf-8'))\n except AttributeError:\n print(json.dumps(obj))\n\"\"\"\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\ndef basic_remote_logger():\n logging.basicConfig()\n logger = logging.getLogger(socket.gethostname())\n logger.setLevel(logging.DEBUG)\n return logger\n\n\ndef needs_ssh(hostname, _socket=None):\n \"\"\"\n Obtains remote hostname of the socket and cuts off the domain part\n of its FQDN.\n \"\"\"\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True\n\n\ndef get_python_executable(conn):\n \"\"\"\n Try to determine the remote Python version so that it can be used\n when executing. Avoids the problem of different Python versions, or distros\n that do not use ``python`` but do ``python3``\n \"\"\"\n executables = ['python3', 'python', 'python2.7']\n for executable in executables:\n conn.logger.debug(\n 'trying to determine remote python executable with %s' % executable\n )\n out, err, code = check(conn, ['which', executable])\n if code:\n conn.logger.warning('skipping %s, was not found in path' %\n executable)\n else:\n try:\n return out[0].strip()\n except IndexError:\n conn.logger.warning('could not parse stdout: %s' % out)\n conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter\n )\n return conn.interpreter\n",
"step-5": "import inspect\nimport json\nimport socket\nimport sys\nimport execnet\nimport logging\nfrom remoto.process import check\n\n\nclass BaseConnection(object):\n \"\"\"\n Base class for Connection objects. Provides a generic interface to execnet\n for setting up the connection\n \"\"\"\n executable = ''\n remote_import_system = 'legacy'\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,\n detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None # wait for ever\n\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because \"\n \"%s is not installed there\" % self.interpreter\n )\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(\n self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(\n self._make_connection_string(self.hostname, use_sudo=False)\n )\n\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())'\n )\n\n result = channel.receive()\n gw.exit()\n\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (\n self.ssh_options, hostname, interpreter\n )\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.logger,\n python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = \"module %s does not have attribute %s\" % (str(self.module), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send(\"%s(%s)\" % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n # Error will come as a string of a traceback, remove everything\n # up to the actual exception since we do get garbage otherwise\n # that points to non-existent lines in the compiled code\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\ndump_template = \"\"\"\nif __name__ == '__main__':\n import json, traceback\n obj = {'return': None, 'exception': None}\n try:\n obj['return'] = %s%s\n except Exception:\n obj['exception'] = traceback.format_exc()\n try:\n print(json.dumps(obj).decode('utf-8'))\n except AttributeError:\n print(json.dumps(obj))\n\"\"\"\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = \"module %s does not have attribute %s\" % (str(self.module), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(args))\n else:\n source = self._module_source + dump_template % (name, '()')\n\n # check python interpreter\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n\n out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = [\n 'Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>',\n 'Exception: error calling \"%s\"' % name\n ]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' % ('\\n'.join(out), '\\n'.join(err)))\n # at this point, there was no stdout, and the exit code was 0,\n # we must return so that we don't fail trying to serialize back\n # the JSON\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\ndef basic_remote_logger():\n logging.basicConfig()\n logger = logging.getLogger(socket.gethostname())\n logger.setLevel(logging.DEBUG)\n return logger\n\n\ndef needs_ssh(hostname, _socket=None):\n \"\"\"\n Obtains remote hostname of the socket and cuts off the domain part\n of its FQDN.\n \"\"\"\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True\n\n\ndef get_python_executable(conn):\n \"\"\"\n Try to determine the remote Python version so that it can be used\n when executing. Avoids the problem of different Python versions, or distros\n that do not use ``python`` but do ``python3``\n \"\"\"\n # executables in order of preference:\n executables = ['python3', 'python', 'python2.7']\n for executable in executables:\n conn.logger.debug('trying to determine remote python executable with %s' % executable)\n out, err, code = check(conn, ['which', executable])\n if code:\n conn.logger.warning('skipping %s, was not found in path' % executable)\n else:\n try:\n return out[0].strip()\n except IndexError:\n conn.logger.warning('could not parse stdout: %s' % out)\n\n # if all fails, we just return whatever the main connection had\n conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)\n return conn.interpreter\n",
"step-ids": [
19,
23,
27,
30,
31
]
}
|
[
19,
23,
27,
30,
31
] |
import pickle
import numpy as np
import torch
import time
import torchvision
import matplotlib
import matplotlib.pyplot as plt
def load_cifar_data(data_files):
data = []
labels = []
for file in data_files:
with open(file, 'rb') as fo:
data_dict = pickle.load(fo, encoding='bytes')
if len(data) == 0:
data = data_dict[str.encode('data')]
labels = data_dict[str.encode('labels')]
else:
data = np.vstack((data, data_dict[str.encode('data')]))
labels.extend(data_dict[str.encode('labels')])
return data, labels
def unpickle(file):
with open(file, 'rb') as fo:
res = pickle.load(fo, encoding='bytes')
return res
def get_classwise_indices(labels):
label_indices = {}
for idx, label in enumerate(labels):
if label not in label_indices.keys():
label_indices[label] = [idx]
else:
label_indices[label].append(idx)
return label_indices
def get_data_from_indices(data, indices_dict, count_per_class, image_shape):
generated_data = []
generated_labels = []
for key, val in indices_dict.items():
if count_per_class:
for i in range(count_per_class):
generated_data.append(np.reshape(data[val[i]], image_shape))
generated_labels.append(key)
else:
for i in val:
generated_data.append(np.reshape(data[i], image_shape))
generated_labels.append(key)
return np.asarray(generated_data), np.reshape(np.asarray(generated_labels, dtype=np.int32), (-1,1))
def create_data_loader(data_x, data_y, batch_size, shuffle):
tensor_x = torch.stack([torch.Tensor(i) for i in data_x]) # transform to torch tensors
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) # create datset
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) # create dataloader
return dataloader
def train_model(model, train_data_loader, test_data_loader, num_epochs=5, learning_rate=0.001, save_epochs=None, model_name="cnn"):
num_epochs = num_epochs
learning_rate = learning_rate
# Loss and optimizer
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
total_step = len(train_data_loader)
train_times = []
train_accuracies = []
train_losses = []
test_accuracies = []
for epoch in range(num_epochs):
start_time = time.time()
for i, (images, labels) in enumerate(train_data_loader):
# Forward pass
outputs = model(images)
target = torch.max(labels.long(), 1)[0]
loss = criterion(outputs, target)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 200 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
end_time = time.time()
if save_epochs and epoch + 1 in save_epochs:
torch.save(model, "../data/models/" + model_name + "_" + str(epoch+1))
train_times.append(end_time - start_time)
train_losses.append(loss.item())
print("Calculating train accuracy...")
train_accuracies.append(get_accuracies(train_data_loader, model)[0])
print("Calculating test accuracy...")
test_accuracies.append(get_accuracies(test_data_loader, model)[0])
print("Average training time per epoch:", np.mean(train_times))
print("Total training time for all epochs:", np.sum(train_times))
return train_accuracies, test_accuracies, train_losses
def get_accuracies(data_loader, model):
start_time = time.time()
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data_loader:
labels = torch.max(labels.long(), 1)[0]
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
end_time = time.time()
time_taken = end_time - start_time
print('Accuracy of the model: {} %'.format(accuracy))
return accuracy, time_taken
def get_model_size(model, model_name):
model = pickle.dumps(net)
byte_size = sys.getsizeof(model)
print('Size of ' + model_name + ' model: ', byte_size/1000000)
def imshow(img, label_names, file_name="../data/sample_images"):
npimg = img.numpy()
npimg = npimg.astype(np.uint8)
npimg = np.transpose(npimg, (1, 2, 0))
plt.clf()
im = plt.imshow(npimg)
ylim = im.get_extent()[2]
plt.yticks(np.arange(0, ylim + 1, ylim/len(label_names)), label_names)
plt.savefig(file_name)
plt.show()
def show_classwise_images(data, labels, label_names, k):
image_dict = {}
for idx, l in enumerate(labels):
label = l[0]
if label in image_dict.keys() and len(image_dict[label]) < k:
image_dict[label].append(data[idx])
elif label not in image_dict.keys():
image_dict[label] = [data[idx]]
images_to_show = []
labels_to_show = []
for label, image in image_dict.items():
labels_to_show.append(label_names[label])
for i in image:
images_to_show.append(i)
images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])
imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)
def outlier_analysis(model, outliers_tensor, outlier_label_names, cifar10_label_names):
model.eval()
predicted_labels = []
with torch.no_grad():
start_time = time.time()
outputs = model(outliers_tensor)
end_time = time.time()
print("Time taken for prediction:", str(end_time - start_time))
_, predicted = torch.max(outputs.data, 1)
for idx, label in enumerate(predicted):
print("Original:", outlier_label_names[idx], "Predicted:", cifar10_label_names[label])
predicted_labels.append(cifar10_label_names[label])
imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1), predicted_labels)
def plot_values(x, y, xlabel, ylabel, title, legend, fig_name):
plt.clf()
for y_i in y:
plt.plot(x, y_i)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.legend(legend)
plt.savefig("../data/plots/" + fig_name)
plt.show()
|
normal
|
{
"blob_id": "66fe0a3b84773ee1d4f91d8fde60f1fc5b3d7e4c",
"index": 6454,
"step-1": "<mask token>\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\n<mask token>\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n\n\n<mask token>\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\ndef outlier_analysis(model, outliers_tensor, outlier_label_names,\n cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print('Time taken for prediction:', str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print('Original:', outlier_label_names[idx], 'Predicted:',\n cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),\n predicted_labels)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n\n\ndef get_classwise_indices(labels):\n label_indices = {}\n for idx, label in enumerate(labels):\n if label not in label_indices.keys():\n label_indices[label] = [idx]\n else:\n label_indices[label].append(idx)\n return label_indices\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\ndef outlier_analysis(model, outliers_tensor, outlier_label_names,\n cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print('Time taken for prediction:', str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print('Original:', outlier_label_names[idx], 'Predicted:',\n cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),\n predicted_labels)\n\n\ndef plot_values(x, y, xlabel, ylabel, title, legend, fig_name):\n plt.clf()\n for y_i in y:\n plt.plot(x, y_i)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend(legend)\n plt.savefig('../data/plots/' + fig_name)\n plt.show()\n",
"step-4": "import pickle\nimport numpy as np\nimport torch\nimport time\nimport torchvision\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n\n\ndef get_classwise_indices(labels):\n label_indices = {}\n for idx, label in enumerate(labels):\n if label not in label_indices.keys():\n label_indices[label] = [idx]\n else:\n label_indices[label].append(idx)\n return label_indices\n\n\ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(\n generated_labels, dtype=np.int32), (-1, 1))\n\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x])\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=shuffle)\n return dataloader\n\n\ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5,\n learning_rate=0.001, save_epochs=None, model_name='cnn'):\n num_epochs = num_epochs\n learning_rate = learning_rate\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 200 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, '../data/models/' + model_name + '_' + str(\n epoch + 1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item())\n print('Calculating train accuracy...')\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print('Calculating test accuracy...')\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print('Average training time per epoch:', np.mean(train_times))\n print('Total training time for all epochs:', np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n\n\ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size / 1000000)\n\n\ndef imshow(img, label_names, file_name='../data/sample_images'):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim / len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n\n\ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n\n\ndef outlier_analysis(model, outliers_tensor, outlier_label_names,\n cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print('Time taken for prediction:', str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print('Original:', outlier_label_names[idx], 'Predicted:',\n cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1),\n predicted_labels)\n\n\ndef plot_values(x, y, xlabel, ylabel, title, legend, fig_name):\n plt.clf()\n for y_i in y:\n plt.plot(x, y_i)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend(legend)\n plt.savefig('../data/plots/' + fig_name)\n plt.show()\n",
"step-5": "import pickle\nimport numpy as np\nimport torch\nimport time\nimport torchvision\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef load_cifar_data(data_files):\n data = []\n labels = []\n for file in data_files:\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n if len(data) == 0:\n data = data_dict[str.encode('data')]\n labels = data_dict[str.encode('labels')]\n else:\n data = np.vstack((data, data_dict[str.encode('data')]))\n labels.extend(data_dict[str.encode('labels')])\n return data, labels\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n res = pickle.load(fo, encoding='bytes')\n return res\n \ndef get_classwise_indices(labels):\n label_indices = {}\n for idx, label in enumerate(labels):\n if label not in label_indices.keys():\n label_indices[label] = [idx]\n else:\n label_indices[label].append(idx)\n return label_indices\n \ndef get_data_from_indices(data, indices_dict, count_per_class, image_shape):\n generated_data = []\n generated_labels = []\n for key, val in indices_dict.items():\n if count_per_class:\n for i in range(count_per_class):\n generated_data.append(np.reshape(data[val[i]], image_shape))\n generated_labels.append(key)\n else:\n for i in val:\n generated_data.append(np.reshape(data[i], image_shape))\n generated_labels.append(key)\n return np.asarray(generated_data), np.reshape(np.asarray(generated_labels, dtype=np.int32), (-1,1))\n\ndef create_data_loader(data_x, data_y, batch_size, shuffle):\n tensor_x = torch.stack([torch.Tensor(i) for i in data_x]) # transform to torch tensors\n tensor_y = torch.stack([torch.Tensor(i) for i in data_y])\n\n dataset = torch.utils.data.TensorDataset(tensor_x,tensor_y) # create datset\n dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle) # create dataloader\n return dataloader\n \ndef train_model(model, train_data_loader, test_data_loader, num_epochs=5, learning_rate=0.001, save_epochs=None, model_name=\"cnn\"):\n num_epochs = num_epochs\n learning_rate = learning_rate\n\n # Loss and optimizer\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n \n total_step = len(train_data_loader)\n train_times = []\n train_accuracies = []\n train_losses = []\n test_accuracies = []\n \n for epoch in range(num_epochs):\n start_time = time.time()\n for i, (images, labels) in enumerate(train_data_loader):\n # Forward pass\n outputs = model(images)\n target = torch.max(labels.long(), 1)[0]\n loss = criterion(outputs, target)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i+1) % 200 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n end_time = time.time()\n if save_epochs and epoch + 1 in save_epochs:\n torch.save(model, \"../data/models/\" + model_name + \"_\" + str(epoch+1))\n train_times.append(end_time - start_time)\n train_losses.append(loss.item()) \n print(\"Calculating train accuracy...\")\n train_accuracies.append(get_accuracies(train_data_loader, model)[0])\n print(\"Calculating test accuracy...\")\n test_accuracies.append(get_accuracies(test_data_loader, model)[0])\n print(\"Average training time per epoch:\", np.mean(train_times))\n print(\"Total training time for all epochs:\", np.sum(train_times))\n return train_accuracies, test_accuracies, train_losses\n\ndef get_accuracies(data_loader, model):\n start_time = time.time()\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images, labels in data_loader:\n labels = torch.max(labels.long(), 1)[0]\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n accuracy = 100 * correct / total\n end_time = time.time()\n time_taken = end_time - start_time\n print('Accuracy of the model: {} %'.format(accuracy))\n return accuracy, time_taken\n \ndef get_model_size(model, model_name):\n model = pickle.dumps(net)\n byte_size = sys.getsizeof(model)\n print('Size of ' + model_name + ' model: ', byte_size/1000000)\n \ndef imshow(img, label_names, file_name=\"../data/sample_images\"):\n npimg = img.numpy()\n npimg = npimg.astype(np.uint8)\n npimg = np.transpose(npimg, (1, 2, 0))\n plt.clf()\n im = plt.imshow(npimg)\n ylim = im.get_extent()[2]\n plt.yticks(np.arange(0, ylim + 1, ylim/len(label_names)), label_names)\n plt.savefig(file_name)\n plt.show()\n \ndef show_classwise_images(data, labels, label_names, k):\n image_dict = {}\n for idx, l in enumerate(labels):\n label = l[0]\n if label in image_dict.keys() and len(image_dict[label]) < k:\n image_dict[label].append(data[idx])\n elif label not in image_dict.keys():\n image_dict[label] = [data[idx]]\n \n images_to_show = []\n labels_to_show = []\n for label, image in image_dict.items():\n labels_to_show.append(label_names[label])\n for i in image:\n images_to_show.append(i)\n \n images_tensor = torch.stack([torch.Tensor(i) for i in images_to_show])\n \n imshow(torchvision.utils.make_grid(images_tensor, nrow=k), labels_to_show)\n \ndef outlier_analysis(model, outliers_tensor, outlier_label_names, cifar10_label_names):\n model.eval()\n predicted_labels = []\n with torch.no_grad():\n start_time = time.time()\n outputs = model(outliers_tensor)\n end_time = time.time()\n print(\"Time taken for prediction:\", str(end_time - start_time))\n _, predicted = torch.max(outputs.data, 1)\n for idx, label in enumerate(predicted):\n print(\"Original:\", outlier_label_names[idx], \"Predicted:\", cifar10_label_names[label])\n predicted_labels.append(cifar10_label_names[label])\n imshow(torchvision.utils.make_grid(outliers_tensor, nrow=1), predicted_labels)\n \ndef plot_values(x, y, xlabel, ylabel, title, legend, fig_name):\n plt.clf()\n for y_i in y:\n plt.plot(x, y_i)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.legend(legend)\n plt.savefig(\"../data/plots/\" + fig_name)\n plt.show()",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('scenario', '0005_auto_20200729_1149')]
operations = [migrations.RemoveField(model_name='weapon', name=
'vehicle'), migrations.DeleteModel(name='Vehicle')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('scenario', '0005_auto_20200729_1149')]
operations = [migrations.RemoveField(model_name='weapon', name=
'vehicle'), migrations.DeleteModel(name='Vehicle')]
<|reserved_special_token_1|>
# Generated by Django 3.0.8 on 2020-07-29 18:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scenario', '0005_auto_20200729_1149'),
]
operations = [
migrations.RemoveField(
model_name='weapon',
name='vehicle',
),
migrations.DeleteModel(
name='Vehicle',
),
]
|
flexible
|
{
"blob_id": "b99093fb13c59d4b9bb0a4f32fb62423d6752118",
"index": 6480,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('scenario', '0005_auto_20200729_1149')]\n operations = [migrations.RemoveField(model_name='weapon', name=\n 'vehicle'), migrations.DeleteModel(name='Vehicle')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('scenario', '0005_auto_20200729_1149')]\n operations = [migrations.RemoveField(model_name='weapon', name=\n 'vehicle'), migrations.DeleteModel(name='Vehicle')]\n",
"step-5": "# Generated by Django 3.0.8 on 2020-07-29 18:30\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('scenario', '0005_auto_20200729_1149'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='weapon',\n name='vehicle',\n ),\n migrations.DeleteModel(\n name='Vehicle',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .BLWecc import (
curve,
setCurve,
getPublicKey,
getPrivateKey,
getAddress as getAddressByCode,
pub2add as getAddressByPublicKey,
sign,
verifyTx as verify,
)
|
normal
|
{
"blob_id": "25ee13314c7cf828b8805d9f483bd5ee12073228",
"index": 8004,
"step-1": "<mask token>\n",
"step-2": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom .BLWecc import curve, setCurve, getPublicKey, getPrivateKey, getAddress as getAddressByCode, pub2add as getAddressByPublicKey, sign, verifyTx as verify\n",
"step-3": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom .BLWecc import (\ncurve,\nsetCurve,\ngetPublicKey,\ngetPrivateKey,\ngetAddress as getAddressByCode, \npub2add as getAddressByPublicKey,\nsign,\nverifyTx as verify,\n)\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
Character class
'''
import pygame
from time import sleep
class Character:
def __init__(self, screen, side_length, border_width, valid_points, start_point, end_point, current_position, a_colour, na_colour,\
keys=None, k_colour=None):
self.screen = screen # pygame screen
self.side_length = side_length # length of the grid unit
self.border_width = border_width # border width of the grid unit
self.start_point = start_point # starting point of character in maze stored as a tuple
self.end_point = end_point # end point of character in maze (tuple)
self.current_position = current_position # current position of character (tuple)
self.a_colour = a_colour # active colour of the character (tuple of 3 elements) RGB colour
self.na_colour = na_colour # inactive colour of the character (tuple of 3 elements) RGB colour
# draw the initial position of the character
self.draw_position()
# draw the character
def draw_position(self):
pygame.draw.rect(self.screen, self.a_colour, [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\
self.border_width+(self.side_length+self.border_width)*self.current_position[1], self.side_length, self.side_length])
# move the character to next position
def move_character(self, next_position):
# create a rectangle for the current position
current_rect = [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\
self.border_width+(self.side_length+self.border_width)*self.current_position[1],\
self.side_length, self.side_length]
# create a rectangle for the next position
next_rect = [self.border_width+(self.side_length+self.border_width)*next_position[0],\
self.border_width+(self.side_length+self.border_width)*next_position[1],\
self.side_length, self.side_length]
# draw the previous position of the character as an inactive block
pygame.draw.rect(self.screen, self.na_colour, current_rect)
# update the screen at the current point
pygame.display.update(current_rect)
# draw the next position of the character
pygame.draw.rect(self.screen, self.a_colour, next_rect)
# update the screen at the next point
pygame.display.update(next_rect)
# update the current position of the character to the next position
self.current_position = next_position
# draw the intermediate steps when moving a character
def move_character_smooth(self, next_position, steps):
# go right
if next_position[0] != self.current_position[0]:
# from i = 1 to steps
for i in range(1,steps+1):
# short delay between each intermediate step
sleep(0.005)
difference = (next_position[0]-self.current_position[0])*i/steps
next_pos = (self.current_position[0]+difference, self.current_position[1])
self.move_character(next_pos)
else:
for i in range(1,steps+1):
sleep(0.005)
difference = (next_position[1]-self.current_position[1])*i/steps
next_pos = (self.current_position[0], self.current_position[1]+difference)
self.move_character(next_pos)
# return the current position of the character
def get_current_position(self):
return self.current_position
# end goal flag
def reached_goal(self):
if self.current_position == self.end_point:
return True
else:
return False
|
normal
|
{
"blob_id": "f7f96b19bdc20f732566709a7801002fe49d49eb",
"index": 3214,
"step-1": "<mask token>\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n <mask token>\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n\n def draw_position(self):\n pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (\n self.side_length + self.border_width) * self.current_position[0\n ], self.border_width + (self.side_length + self.border_width) *\n self.current_position[1], self.side_length, self.side_length])\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n\n def draw_position(self):\n pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (\n self.side_length + self.border_width) * self.current_position[0\n ], self.border_width + (self.side_length + self.border_width) *\n self.current_position[1], self.side_length, self.side_length])\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n\n def reached_goal(self):\n if self.current_position == self.end_point:\n return True\n else:\n return False\n",
"step-4": "<mask token>\nimport pygame\nfrom time import sleep\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n\n def draw_position(self):\n pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (\n self.side_length + self.border_width) * self.current_position[0\n ], self.border_width + (self.side_length + self.border_width) *\n self.current_position[1], self.side_length, self.side_length])\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n\n def reached_goal(self):\n if self.current_position == self.end_point:\n return True\n else:\n return False\n",
"step-5": "'''\nCharacter class\n'''\n\nimport pygame\nfrom time import sleep\n\nclass Character:\n\n\tdef __init__(self, screen, side_length, border_width, valid_points, start_point, end_point, current_position, a_colour, na_colour,\\\n\t\t\t\tkeys=None, k_colour=None):\n\n\t\tself.screen = screen # pygame screen\n\t\tself.side_length = side_length # length of the grid unit\n\t\tself.border_width = border_width # border width of the grid unit\n\t\tself.start_point = start_point # starting point of character in maze stored as a tuple\n\t\tself.end_point = end_point # end point of character in maze (tuple)\n\t\tself.current_position = current_position # current position of character (tuple)\n\t\tself.a_colour = a_colour # active colour of the character (tuple of 3 elements) RGB colour\n\t\tself.na_colour = na_colour # inactive colour of the character (tuple of 3 elements) RGB colour\n\t\t\n\t\t\t\n\t\t# draw the initial position of the character\n\t\tself.draw_position()\n\n\t# draw the character\n\tdef draw_position(self):\n\t\tpygame.draw.rect(self.screen, self.a_colour, [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\\\n\t\t\tself.border_width+(self.side_length+self.border_width)*self.current_position[1], self.side_length, self.side_length])\n\n\t# move the character to next position\n\tdef move_character(self, next_position):\n\t\t# create a rectangle for the current position\n\t\tcurrent_rect = [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\\\n\t\t\t\t\t\tself.border_width+(self.side_length+self.border_width)*self.current_position[1],\\\n\t\t\t\t\t\tself.side_length, self.side_length]\n\t\t# create a rectangle for the next position\n\t\tnext_rect = [self.border_width+(self.side_length+self.border_width)*next_position[0],\\\n\t\t\t\t\t self.border_width+(self.side_length+self.border_width)*next_position[1],\\\n\t\t\t\t\t self.side_length, self.side_length]\n\t\t# draw the previous position of the character as an inactive block\n\t\tpygame.draw.rect(self.screen, self.na_colour, current_rect)\n\t\t# update the screen at the current point\n\t\tpygame.display.update(current_rect)\n\t\t# draw the next position of the character\n\t\tpygame.draw.rect(self.screen, self.a_colour, next_rect)\n\t\t# update the screen at the next point\n\t\tpygame.display.update(next_rect)\n\t\t# update the current position of the character to the next position\n\t\tself.current_position = next_position\n\n\n\t# draw the intermediate steps when moving a character\n\tdef move_character_smooth(self, next_position, steps):\n\t\t# go right\n\t\tif next_position[0] != self.current_position[0]:\n\t\t\t# from i = 1 to steps\n\t\t\tfor i in range(1,steps+1):\n\t\t\t\t# short delay between each intermediate step\n\t\t\t\tsleep(0.005)\n\t\t\t\tdifference = (next_position[0]-self.current_position[0])*i/steps\n\t\t\t\tnext_pos = (self.current_position[0]+difference, self.current_position[1])\n\t\t\t\tself.move_character(next_pos)\n\t\telse:\n\t\t\tfor i in range(1,steps+1):\n\t\t\t\tsleep(0.005)\n\t\t\t\tdifference = (next_position[1]-self.current_position[1])*i/steps\n\t\t\t\tnext_pos = (self.current_position[0], self.current_position[1]+difference)\n\t\t\t\tself.move_character(next_pos)\n\n\t# return the current position of the character\n\tdef get_current_position(self):\n\t\treturn self.current_position\n\n\t# end goal flag\n\tdef reached_goal(self):\n\t\tif self.current_position == self.end_point:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
# KEEP IN MIND IF YOU DECIDE TO USE THIS FUNCTION LATER
# IN YOUR PROJECT THAT IF YOU READ THE IMAGE WITH
# cv2.imread() INSTEAD YOU START WITH BGR COLOR!
def bin_spatial(img, color_space='RGB', size=(32, 32)):
colour_dict = { 'RGB':'RGB',
'BGR':cv2.COLOR_BGR2RGB,
'HLS':cv2.COLOR_BGR2HLS,
'HSV':cv2.COLOR_BGR2HSV,
'LUV':cv2.COLOR_BGR2LUV,
'YUV': cv2.COLOR_RGB2YUV,
'YCrCb': cv2.COLOR_RGB2YCrCb
}
# If someother Colour Space
if color_space.upper() != 'RGB':
method = colour_dict.get(color_space, 'RGB')
img = cv2.cvtColor(img, method)
else:
img = np.copy(img)
small_img = cv2.resize(img, size)
feature_vec = small_img.ravel()
# Return the feature vector
return feature_vec
if __name__ == "__main__":
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
##
## Solution
##
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
# def bin_spatial(img, color_space='RGB', size=(32, 32)):
# # Convert image to new color space (if specified)
# if color_space != 'RGB':
# if color_space == 'HSV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# elif color_space == 'LUV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
# elif color_space == 'HLS':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# elif color_space == 'YUV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
# elif color_space == 'YCrCb':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
# else: feature_image = np.copy(img)
# # Use cv2.resize().ravel() to create the feature vector
# features = cv2.resize(feature_image, size).ravel()
# # Return the feature vector
# return features
|
normal
|
{
"blob_id": "f178ae70ce54244624c2254d0d6256b83144db33",
"index": 5085,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.\n COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n return feature_vec\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.\n COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n return feature_vec\n\n\nif __name__ == '__main__':\n image = mpimg.imread('cutout1.jpg')\n feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))\n plt.plot(feature_vec)\n plt.title('Spatially Binned Features')\n",
"step-4": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.\n COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n return feature_vec\n\n\nif __name__ == '__main__':\n image = mpimg.imread('cutout1.jpg')\n feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))\n plt.plot(feature_vec)\n plt.title('Spatially Binned Features')\n",
"step-5": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\n# Define a function to compute color histogram features \n# Pass the color_space flag as 3-letter all caps string\n# like 'HSV' or 'LUV' etc.\n# KEEP IN MIND IF YOU DECIDE TO USE THIS FUNCTION LATER\n# IN YOUR PROJECT THAT IF YOU READ THE IMAGE WITH \n# cv2.imread() INSTEAD YOU START WITH BGR COLOR!\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = { 'RGB':'RGB',\n 'BGR':cv2.COLOR_BGR2RGB,\n 'HLS':cv2.COLOR_BGR2HLS,\n 'HSV':cv2.COLOR_BGR2HSV,\n 'LUV':cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV,\n 'YCrCb': cv2.COLOR_RGB2YCrCb\n }\n \n # If someother Colour Space\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n # Return the feature vector\n return feature_vec\n\nif __name__ == \"__main__\": \n # You can also read cutout2, 3, 4 etc. to see other examples\n image = mpimg.imread('cutout1.jpg')\n feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))\n\n # Plot features\n plt.plot(feature_vec)\n plt.title('Spatially Binned Features')\n\n\n##\n## Solution\n##\n# Define a function to compute color histogram features \n# Pass the color_space flag as 3-letter all caps string\n# like 'HSV' or 'LUV' etc.\n# def bin_spatial(img, color_space='RGB', size=(32, 32)):\n# # Convert image to new color space (if specified)\n# if color_space != 'RGB':\n# if color_space == 'HSV':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n# elif color_space == 'LUV':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n# elif color_space == 'HLS':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n# elif color_space == 'YUV':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n# elif color_space == 'YCrCb':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n# else: feature_image = np.copy(img) \n# # Use cv2.resize().ravel() to create the feature vector\n# features = cv2.resize(feature_image, size).ravel() \n# # Return the feature vector\n# return features",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LinkedList(object):
<|reserved_special_token_0|>
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
<|reserved_special_token_0|>
def __len__(self):
"""Return length of linked list."""
return self.size()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LinkedList(object):
<|reserved_special_token_0|>
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError('Empty list, unable to pop')
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace('[', '(').replace(']', ')')
def __len__(self):
"""Return length of linked list."""
return self.size()
def __str__(self):
"""Display the linked list."""
return self.display()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class LinkedList(object):
"""Build linked list."""
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError('Empty list, unable to pop')
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace('[', '(').replace(']', ')')
def __len__(self):
"""Return length of linked list."""
return self.size()
def __str__(self):
"""Display the linked list."""
return self.display()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Node(object):
"""Build a node object."""
def __init__(self, data=None, next=None):
"""Constructor for the Node object."""
self.data = data
self.next = next
class LinkedList(object):
"""Build linked list."""
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError('Empty list, unable to pop')
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError('Nothing in the list.')
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError('No such node.')
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace('[', '(').replace(']', ')')
def __len__(self):
"""Return length of linked list."""
return self.size()
def __str__(self):
"""Display the linked list."""
return self.display()
<|reserved_special_token_1|>
"""Create a new Node object and attach it a Linked List."""
class Node(object):
"""Build a node object."""
def __init__(self, data=None, next=None):
"""Constructor for the Node object."""
self.data = data
self.next = next
class LinkedList(object):
"""Build linked list."""
def __init__(self, iterable=()):
"""Constructor for the Linked List object."""
self.head = None
self._counter = 0
if isinstance(iterable, (str, tuple, list)):
for item in iterable:
self.push(item)
def push(self, val):
"""Add a new value to the head of the Linked List."""
new_head = Node(val, self.head)
self.head = new_head
self._counter += 1
def pop(self):
"""Remove and return the value if the head of the Linked List."""
if not self.head:
raise IndexError("Empty list, unable to pop")
output = self.head.data
self.head = self.head.next
self._counter -= 1
return output
def size(self):
"""Return size of our list."""
return self._counter
def search(self, val):
"""Search linked list for requested node."""
search_through = self.head
while search_through:
if val == search_through.data:
return search_through
else:
search_through = search_through.next
return search_through
def remove(self, node):
"""Remove selected node."""
current_node = self.head
previous_node = None
found = False
if current_node is None:
raise IndexError("Nothing in the list.")
try:
while current_node and found is False:
if node == current_node.data:
found = True
else:
previous_node = current_node
current_node = current_node.next
if previous_node is None:
self.pop()
elif current_node.next is None:
previous_node.next = None
else:
previous_node.next = current_node.next
except AttributeError:
raise ValueError("No such node.")
self._counter -= 1
def display(self):
"""Display nodes in linked list."""
node = self.head
display_this = []
while node:
display_this.append(node.data)
node = node.next
return str(display_this).replace("[", "(").replace("]", ")")
def __len__(self): # pragma: no cover
"""Return length of linked list."""
return self.size()
def __str__(self): # pragma: no cover
"""Display the linked list."""
return self.display()
|
flexible
|
{
"blob_id": "192bd3c783f6f822f8e732ddf47d7fc3b22c032b",
"index": 1618,
"step-1": "<mask token>\n\n\nclass LinkedList(object):\n <mask token>\n\n def __init__(self, iterable=()):\n \"\"\"Constructor for the Linked List object.\"\"\"\n self.head = None\n self._counter = 0\n if isinstance(iterable, (str, tuple, list)):\n for item in iterable:\n self.push(item)\n\n def push(self, val):\n \"\"\"Add a new value to the head of the Linked List.\"\"\"\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1\n <mask token>\n <mask token>\n\n def search(self, val):\n \"\"\"Search linked list for requested node.\"\"\"\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through\n\n def remove(self, node):\n \"\"\"Remove selected node.\"\"\"\n current_node = self.head\n previous_node = None\n found = False\n if current_node is None:\n raise IndexError('Nothing in the list.')\n try:\n while current_node and found is False:\n if node == current_node.data:\n found = True\n else:\n previous_node = current_node\n current_node = current_node.next\n if previous_node is None:\n self.pop()\n elif current_node.next is None:\n previous_node.next = None\n else:\n previous_node.next = current_node.next\n except AttributeError:\n raise ValueError('No such node.')\n self._counter -= 1\n <mask token>\n\n def __len__(self):\n \"\"\"Return length of linked list.\"\"\"\n return self.size()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LinkedList(object):\n <mask token>\n\n def __init__(self, iterable=()):\n \"\"\"Constructor for the Linked List object.\"\"\"\n self.head = None\n self._counter = 0\n if isinstance(iterable, (str, tuple, list)):\n for item in iterable:\n self.push(item)\n\n def push(self, val):\n \"\"\"Add a new value to the head of the Linked List.\"\"\"\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1\n\n def pop(self):\n \"\"\"Remove and return the value if the head of the Linked List.\"\"\"\n if not self.head:\n raise IndexError('Empty list, unable to pop')\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output\n\n def size(self):\n \"\"\"Return size of our list.\"\"\"\n return self._counter\n\n def search(self, val):\n \"\"\"Search linked list for requested node.\"\"\"\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through\n\n def remove(self, node):\n \"\"\"Remove selected node.\"\"\"\n current_node = self.head\n previous_node = None\n found = False\n if current_node is None:\n raise IndexError('Nothing in the list.')\n try:\n while current_node and found is False:\n if node == current_node.data:\n found = True\n else:\n previous_node = current_node\n current_node = current_node.next\n if previous_node is None:\n self.pop()\n elif current_node.next is None:\n previous_node.next = None\n else:\n previous_node.next = current_node.next\n except AttributeError:\n raise ValueError('No such node.')\n self._counter -= 1\n\n def display(self):\n \"\"\"Display nodes in linked list.\"\"\"\n node = self.head\n display_this = []\n while node:\n display_this.append(node.data)\n node = node.next\n return str(display_this).replace('[', '(').replace(']', ')')\n\n def __len__(self):\n \"\"\"Return length of linked list.\"\"\"\n return self.size()\n\n def __str__(self):\n \"\"\"Display the linked list.\"\"\"\n return self.display()\n",
"step-3": "<mask token>\n\n\nclass Node(object):\n <mask token>\n <mask token>\n\n\nclass LinkedList(object):\n \"\"\"Build linked list.\"\"\"\n\n def __init__(self, iterable=()):\n \"\"\"Constructor for the Linked List object.\"\"\"\n self.head = None\n self._counter = 0\n if isinstance(iterable, (str, tuple, list)):\n for item in iterable:\n self.push(item)\n\n def push(self, val):\n \"\"\"Add a new value to the head of the Linked List.\"\"\"\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1\n\n def pop(self):\n \"\"\"Remove and return the value if the head of the Linked List.\"\"\"\n if not self.head:\n raise IndexError('Empty list, unable to pop')\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output\n\n def size(self):\n \"\"\"Return size of our list.\"\"\"\n return self._counter\n\n def search(self, val):\n \"\"\"Search linked list for requested node.\"\"\"\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through\n\n def remove(self, node):\n \"\"\"Remove selected node.\"\"\"\n current_node = self.head\n previous_node = None\n found = False\n if current_node is None:\n raise IndexError('Nothing in the list.')\n try:\n while current_node and found is False:\n if node == current_node.data:\n found = True\n else:\n previous_node = current_node\n current_node = current_node.next\n if previous_node is None:\n self.pop()\n elif current_node.next is None:\n previous_node.next = None\n else:\n previous_node.next = current_node.next\n except AttributeError:\n raise ValueError('No such node.')\n self._counter -= 1\n\n def display(self):\n \"\"\"Display nodes in linked list.\"\"\"\n node = self.head\n display_this = []\n while node:\n display_this.append(node.data)\n node = node.next\n return str(display_this).replace('[', '(').replace(']', ')')\n\n def __len__(self):\n \"\"\"Return length of linked list.\"\"\"\n return self.size()\n\n def __str__(self):\n \"\"\"Display the linked list.\"\"\"\n return self.display()\n",
"step-4": "<mask token>\n\n\nclass Node(object):\n \"\"\"Build a node object.\"\"\"\n\n def __init__(self, data=None, next=None):\n \"\"\"Constructor for the Node object.\"\"\"\n self.data = data\n self.next = next\n\n\nclass LinkedList(object):\n \"\"\"Build linked list.\"\"\"\n\n def __init__(self, iterable=()):\n \"\"\"Constructor for the Linked List object.\"\"\"\n self.head = None\n self._counter = 0\n if isinstance(iterable, (str, tuple, list)):\n for item in iterable:\n self.push(item)\n\n def push(self, val):\n \"\"\"Add a new value to the head of the Linked List.\"\"\"\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1\n\n def pop(self):\n \"\"\"Remove and return the value if the head of the Linked List.\"\"\"\n if not self.head:\n raise IndexError('Empty list, unable to pop')\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output\n\n def size(self):\n \"\"\"Return size of our list.\"\"\"\n return self._counter\n\n def search(self, val):\n \"\"\"Search linked list for requested node.\"\"\"\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through\n\n def remove(self, node):\n \"\"\"Remove selected node.\"\"\"\n current_node = self.head\n previous_node = None\n found = False\n if current_node is None:\n raise IndexError('Nothing in the list.')\n try:\n while current_node and found is False:\n if node == current_node.data:\n found = True\n else:\n previous_node = current_node\n current_node = current_node.next\n if previous_node is None:\n self.pop()\n elif current_node.next is None:\n previous_node.next = None\n else:\n previous_node.next = current_node.next\n except AttributeError:\n raise ValueError('No such node.')\n self._counter -= 1\n\n def display(self):\n \"\"\"Display nodes in linked list.\"\"\"\n node = self.head\n display_this = []\n while node:\n display_this.append(node.data)\n node = node.next\n return str(display_this).replace('[', '(').replace(']', ')')\n\n def __len__(self):\n \"\"\"Return length of linked list.\"\"\"\n return self.size()\n\n def __str__(self):\n \"\"\"Display the linked list.\"\"\"\n return self.display()\n",
"step-5": "\"\"\"Create a new Node object and attach it a Linked List.\"\"\"\n\n\nclass Node(object):\n \"\"\"Build a node object.\"\"\"\n\n def __init__(self, data=None, next=None):\n \"\"\"Constructor for the Node object.\"\"\"\n self.data = data\n self.next = next\n\n\nclass LinkedList(object):\n \"\"\"Build linked list.\"\"\"\n\n def __init__(self, iterable=()):\n \"\"\"Constructor for the Linked List object.\"\"\"\n self.head = None\n self._counter = 0\n if isinstance(iterable, (str, tuple, list)):\n for item in iterable:\n self.push(item)\n\n def push(self, val):\n \"\"\"Add a new value to the head of the Linked List.\"\"\"\n new_head = Node(val, self.head)\n self.head = new_head\n self._counter += 1\n\n def pop(self):\n \"\"\"Remove and return the value if the head of the Linked List.\"\"\"\n if not self.head:\n raise IndexError(\"Empty list, unable to pop\")\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output\n\n def size(self):\n \"\"\"Return size of our list.\"\"\"\n return self._counter\n\n def search(self, val):\n \"\"\"Search linked list for requested node.\"\"\"\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through\n\n def remove(self, node):\n \"\"\"Remove selected node.\"\"\"\n current_node = self.head\n previous_node = None\n found = False\n if current_node is None:\n raise IndexError(\"Nothing in the list.\")\n try:\n while current_node and found is False:\n if node == current_node.data:\n found = True\n else:\n previous_node = current_node\n current_node = current_node.next\n if previous_node is None:\n self.pop()\n elif current_node.next is None:\n previous_node.next = None\n else:\n previous_node.next = current_node.next\n except AttributeError:\n raise ValueError(\"No such node.\")\n self._counter -= 1\n\n def display(self):\n \"\"\"Display nodes in linked list.\"\"\"\n node = self.head\n display_this = []\n while node:\n display_this.append(node.data)\n node = node.next\n return str(display_this).replace(\"[\", \"(\").replace(\"]\", \")\")\n\n def __len__(self): # pragma: no cover\n \"\"\"Return length of linked list.\"\"\"\n return self.size()\n\n def __str__(self): # pragma: no cover\n \"\"\"Display the linked list.\"\"\"\n return self.display()\n",
"step-ids": [
6,
10,
12,
14,
15
]
}
|
[
6,
10,
12,
14,
15
] |
from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5
obj = NistschemaSvIvListNmtokensMinLength5(
value=[
"f",
"D",
"T",
"a",
"b",
"C",
"o",
"t",
"t",
"w",
]
)
|
normal
|
{
"blob_id": "3941f283893c259033d7fb3be83c8071433064ba",
"index": 7170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nobj = NistschemaSvIvListNmtokensMinLength5(value=['f', 'D', 'T', 'a', 'b',\n 'C', 'o', 't', 't', 'w'])\n",
"step-3": "from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5\nobj = NistschemaSvIvListNmtokensMinLength5(value=['f', 'D', 'T', 'a', 'b',\n 'C', 'o', 't', 't', 'w'])\n",
"step-4": "from output.models.nist_data.list_pkg.nmtokens.schema_instance.nistschema_sv_iv_list_nmtokens_min_length_5_xsd.nistschema_sv_iv_list_nmtokens_min_length_5 import NistschemaSvIvListNmtokensMinLength5\n\n\nobj = NistschemaSvIvListNmtokensMinLength5(\n value=[\n \"f\",\n \"D\",\n \"T\",\n \"a\",\n \"b\",\n \"C\",\n \"o\",\n \"t\",\n \"t\",\n \"w\",\n ]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def findLength(self, A: [int], B: [int]) ->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
dp = [[(0) for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def findLength(self, A: [int], B: [int]) ->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
dp = [[(0) for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
<|reserved_special_token_0|>
print(sol.findLength(la, lb))
<|reserved_special_token_1|>
class Solution:
def findLength(self, A: [int], B: [int]) ->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
dp = [[(0) for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i - 1][j - 1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
sol = Solution()
la = [0, 0, 0, 0, 1]
lb = [1, 0, 0, 0, 0]
print(sol.findLength(la, lb))
<|reserved_special_token_1|>
# leetcode 718 最长重复子数组
# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。
#
# 示例 1:
# 输入:
# A: [1,2,3,2,1]
# B: [3,2,1,4,7]
# 输出: 3
# 解释:
# 长度最长的公共子数组是 [3, 2, 1]。
#
# 说明:
# 1 <= len(A), len(B) <= 1000
# 0 <= A[i], B[i] < 100
class Solution:
def findLength(self, A: [int], B: [int])->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
# na行,nb列的矩阵
dp = [[0 for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
sol = Solution()
la = [0,0,0,0,1]
lb = [1,0,0,0,0]
print(sol.findLength(la, lb))
|
flexible
|
{
"blob_id": "b8219c21dc2cdd497d3de48c59c146a1fd1509ec",
"index": 6673,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\n<mask token>\nprint(sol.findLength(la, lb))\n",
"step-4": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\nsol = Solution()\nla = [0, 0, 0, 0, 1]\nlb = [1, 0, 0, 0, 0]\nprint(sol.findLength(la, lb))\n",
"step-5": "# leetcode 718 最长重复子数组\n# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。\n#\n# 示例 1:\n# 输入:\n# A: [1,2,3,2,1]\n# B: [3,2,1,4,7]\n# 输出: 3\n# 解释:\n# 长度最长的公共子数组是 [3, 2, 1]。\n#\n# 说明:\n# 1 <= len(A), len(B) <= 1000\n# 0 <= A[i], B[i] < 100\n\n\nclass Solution:\n def findLength(self, A: [int], B: [int])->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n # na行,nb列的矩阵\n dp = [[0 for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i-1][j-1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\nsol = Solution()\nla = [0,0,0,0,1]\nlb = [1,0,0,0,0]\nprint(sol.findLength(la, lb))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def x_shortcut(lng):
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /
NR_SHORTCUTS_PER_LAT)
<|reserved_special_token_0|>
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
<|reserved_special_token_0|>
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
<|reserved_special_token_0|>
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
<|reserved_special_token_0|>
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get('tzid')
all_tz_names.append(tz_name)
geometry = tz_dict.get('geometry')
if geometry.get('type') == 'MultiPolygon':
multipolygon = geometry.get('coordinates')
else:
multipolygon = [geometry.get('coordinates')]
for poly_with_hole in multipolygon:
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(
y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
for hole in poly_with_hole:
print(polygon_counter, tz_name)
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
raise ValueError(
'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'
, max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'
, max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError(
'polygon counter and entry number in all_length is different:',
polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'
, nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
raise ValueError(
'zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts),
'% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return (y - y1) * (x2 - x1) / delta_y + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return (x - x1) * (y2 - y1) / delta_x + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
if y_coords[iplus1] > y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif y_coords[iplus1] <= y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif x_coords[iplus1] <= x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
x_longs, y_longs = ints_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
for lat in latitudes_to_check(ymax, ymin):
intersects = sorted([int2coord(x) for x in x_intersections(
coord2int(lat), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(intersection_in), coord2int(lat) +
1, x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat) - 1))
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat)))
else:
possible_y_shortcut = y_shortcut(lat)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1,
x_longs, y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut_min1))
else:
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
intersects = sorted([int2coord(y) for y in y_intersections(
coord2int(lng), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(lng) - 1, coord2int(
intersection_in), x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1,
y_shortcut(intersection_in)))
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(
intersection_in)))
else:
possible_x_shortcut = x_shortcut(lng)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle),
x_longs, y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
shortcuts_for_line.add((
possible_x_shortcut_min1, y_shortcut(
possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
if line % 100 == 0:
print(line)
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,
ymax, ymin, line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
for x, y in shortcuts_for_line:
if (x < min_x_shortcut or x > max_x_shortcut or y <
min_y_shortcut or y > max_y_shortcut):
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!'
)
if len(shortcuts_for_line) < 3:
raise ValueError(
'algorithm not valid! less than 3 zones detected (should be at least 3)'
)
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
nr_of_floats = 2 * sum(all_lengths)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])
return [x[0] for x in sort]
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
shortcut_entries.append(sort_poly_shortcut(
shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *
NR_SHORTCUTS_PER_LAT):
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:',
amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /
amount_of_shortcuts * 100, 2), '% of all shortcuts)')
shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *
NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))
for nr in nr_of_entries_in_shortcut:
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):',
nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),
coord2int(ymax), coord2int(ymin)))
output_file.close()
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!',
all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError('There are too many polygons in this shortcut:',
nr)
output_file.write(pack(b'<H', nr))
output_file.close()
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
adr += 2 * nr
output_file.close()
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'
)
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
hole_space = 0
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space),
'% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space),
'% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def x_shortcut(lng):
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /
NR_SHORTCUTS_PER_LAT)
def percent(numerator, denominator):
return round(numerator / denominator * 100, 2)
<|reserved_special_token_0|>
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
<|reserved_special_token_0|>
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
<|reserved_special_token_0|>
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
<|reserved_special_token_0|>
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get('tzid')
all_tz_names.append(tz_name)
geometry = tz_dict.get('geometry')
if geometry.get('type') == 'MultiPolygon':
multipolygon = geometry.get('coordinates')
else:
multipolygon = [geometry.get('coordinates')]
for poly_with_hole in multipolygon:
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(
y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
for hole in poly_with_hole:
print(polygon_counter, tz_name)
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
raise ValueError(
'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'
, max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'
, max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError(
'polygon counter and entry number in all_length is different:',
polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'
, nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
raise ValueError(
'zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts),
'% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return (y - y1) * (x2 - x1) / delta_y + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return (x - x1) * (y2 - y1) / delta_x + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
if y_coords[iplus1] > y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif y_coords[iplus1] <= y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif x_coords[iplus1] <= x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
x_longs, y_longs = ints_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
for lat in latitudes_to_check(ymax, ymin):
intersects = sorted([int2coord(x) for x in x_intersections(
coord2int(lat), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(intersection_in), coord2int(lat) +
1, x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat) - 1))
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat)))
else:
possible_y_shortcut = y_shortcut(lat)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1,
x_longs, y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut_min1))
else:
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
intersects = sorted([int2coord(y) for y in y_intersections(
coord2int(lng), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(lng) - 1, coord2int(
intersection_in), x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1,
y_shortcut(intersection_in)))
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(
intersection_in)))
else:
possible_x_shortcut = x_shortcut(lng)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle),
x_longs, y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
shortcuts_for_line.add((
possible_x_shortcut_min1, y_shortcut(
possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
if line % 100 == 0:
print(line)
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,
ymax, ymin, line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
for x, y in shortcuts_for_line:
if (x < min_x_shortcut or x > max_x_shortcut or y <
min_y_shortcut or y > max_y_shortcut):
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!'
)
if len(shortcuts_for_line) < 3:
raise ValueError(
'algorithm not valid! less than 3 zones detected (should be at least 3)'
)
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
nr_of_floats = 2 * sum(all_lengths)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])
return [x[0] for x in sort]
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
shortcut_entries.append(sort_poly_shortcut(
shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *
NR_SHORTCUTS_PER_LAT):
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:',
amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /
amount_of_shortcuts * 100, 2), '% of all shortcuts)')
shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *
NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))
for nr in nr_of_entries_in_shortcut:
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):',
nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),
coord2int(ymax), coord2int(ymin)))
output_file.close()
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!',
all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError('There are too many polygons in this shortcut:',
nr)
output_file.write(pack(b'<H', nr))
output_file.close()
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
adr += 2 * nr
output_file.close()
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'
)
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
hole_space = 0
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space),
'% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space),
'% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def x_shortcut(lng):
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /
NR_SHORTCUTS_PER_LAT)
def percent(numerator, denominator):
return round(numerator / denominator * 100, 2)
<|reserved_special_token_0|>
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
def point_between(p1, p2):
return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
<|reserved_special_token_0|>
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
def _holes_in_poly(poly_nr):
i = 0
for nr in polynrs_of_holes:
if nr == poly_nr:
yield all_holes[i]
i += 1
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get('tzid')
all_tz_names.append(tz_name)
geometry = tz_dict.get('geometry')
if geometry.get('type') == 'MultiPolygon':
multipolygon = geometry.get('coordinates')
else:
multipolygon = [geometry.get('coordinates')]
for poly_with_hole in multipolygon:
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(
y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
for hole in poly_with_hole:
print(polygon_counter, tz_name)
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
raise ValueError(
'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'
, max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'
, max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError(
'polygon counter and entry number in all_length is different:',
polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'
, nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
raise ValueError(
'zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts),
'% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return (y - y1) * (x2 - x1) / delta_y + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return (x - x1) * (y2 - y1) / delta_x + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
if y_coords[iplus1] > y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif y_coords[iplus1] <= y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif x_coords[iplus1] <= x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
x_longs, y_longs = ints_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
for lat in latitudes_to_check(ymax, ymin):
intersects = sorted([int2coord(x) for x in x_intersections(
coord2int(lat), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(intersection_in), coord2int(lat) +
1, x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat) - 1))
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat)))
else:
possible_y_shortcut = y_shortcut(lat)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1,
x_longs, y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut_min1))
else:
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
intersects = sorted([int2coord(y) for y in y_intersections(
coord2int(lng), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(lng) - 1, coord2int(
intersection_in), x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1,
y_shortcut(intersection_in)))
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(
intersection_in)))
else:
possible_x_shortcut = x_shortcut(lng)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle),
x_longs, y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
shortcuts_for_line.add((
possible_x_shortcut_min1, y_shortcut(
possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
if line % 100 == 0:
print(line)
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,
ymax, ymin, line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
for x, y in shortcuts_for_line:
if (x < min_x_shortcut or x > max_x_shortcut or y <
min_y_shortcut or y > max_y_shortcut):
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!'
)
if len(shortcuts_for_line) < 3:
raise ValueError(
'algorithm not valid! less than 3 zones detected (should be at least 3)'
)
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
nr_of_floats = 2 * sum(all_lengths)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])
return [x[0] for x in sort]
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
shortcut_entries.append(sort_poly_shortcut(
shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *
NR_SHORTCUTS_PER_LAT):
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:',
amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /
amount_of_shortcuts * 100, 2), '% of all shortcuts)')
shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *
NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))
for nr in nr_of_entries_in_shortcut:
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):',
nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),
coord2int(ymax), coord2int(ymin)))
output_file.close()
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!',
all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError('There are too many polygons in this shortcut:',
nr)
output_file.write(pack(b'<H', nr))
output_file.close()
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
adr += 2 * nr
output_file.close()
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'
)
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
hole_space = 0
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space),
'% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space),
'% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def x_shortcut(lng):
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /
NR_SHORTCUTS_PER_LAT)
def percent(numerator, denominator):
return round(numerator / denominator * 100, 2)
def accumulated_frequency(int_list):
out = []
total = sum(int_list)
acc = 0
for e in int_list:
acc += e
out.append(percent(acc, total))
return out
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
def point_between(p1, p2):
return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
<|reserved_special_token_0|>
def not_empty(iterable):
for i in iterable:
return True
return False
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
def _holes_in_poly(poly_nr):
i = 0
for nr in polynrs_of_holes:
if nr == poly_nr:
yield all_holes[i]
i += 1
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get('tzid')
all_tz_names.append(tz_name)
geometry = tz_dict.get('geometry')
if geometry.get('type') == 'MultiPolygon':
multipolygon = geometry.get('coordinates')
else:
multipolygon = [geometry.get('coordinates')]
for poly_with_hole in multipolygon:
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(
y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
for hole in poly_with_hole:
print(polygon_counter, tz_name)
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
raise ValueError(
'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'
, max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'
, max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError(
'polygon counter and entry number in all_length is different:',
polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
raise ValueError(
'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'
, nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
raise ValueError(
'zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts),
'% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return (y - y1) * (x2 - x1) / delta_y + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return (x - x1) * (y2 - y1) / delta_x + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
if y_coords[iplus1] > y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif y_coords[iplus1] <= y:
intersects.append(compute_x_intersection(y, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
elif x_coords[iplus1] <= x:
intersects.append(compute_y_intersection(x, x_coords[i],
x_coords[iplus1], y_coords[i], y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
x_longs, y_longs = ints_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
for lat in latitudes_to_check(ymax, ymin):
intersects = sorted([int2coord(x) for x in x_intersections(
coord2int(lat), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(intersection_in), coord2int(lat) +
1, x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat) - 1))
shortcuts_for_line.add((x_shortcut(intersection_in),
y_shortcut(lat)))
else:
possible_y_shortcut = y_shortcut(lat)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1,
x_longs, y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut_min1))
else:
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(
possible_x_coord), possible_y_shortcut))
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
intersects = sorted([int2coord(y) for y in y_intersections(
coord2int(lng), x_longs, y_longs)])
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError(
'an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
if contained(coord2int(lng) - 1, coord2int(
intersection_in), x_longs, y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1,
y_shortcut(intersection_in)))
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(
intersection_in)))
else:
possible_x_shortcut = x_shortcut(lng)
middle = intersection_in + (intersection_out -
intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle),
x_longs, y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
shortcuts_for_line.add((
possible_x_shortcut_min1, y_shortcut(
possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut,
y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
if line % 100 == 0:
print(line)
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,
ymax, ymin, line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
for x, y in shortcuts_for_line:
if (x < min_x_shortcut or x > max_x_shortcut or y <
min_y_shortcut or y > max_y_shortcut):
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!'
)
if len(shortcuts_for_line) < 3:
raise ValueError(
'algorithm not valid! less than 3 zones detected (should be at least 3)'
)
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
nr_of_floats = 2 * sum(all_lengths)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])
return [x[0] for x in sort]
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
shortcut_entries.append(sort_poly_shortcut(
shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *
NR_SHORTCUTS_PER_LAT):
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:',
amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /
amount_of_shortcuts * 100, 2), '% of all shortcuts)')
shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *
NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))
for nr in nr_of_entries_in_shortcut:
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):',
nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),
coord2int(ymax), coord2int(ymin)))
output_file.close()
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!',
all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError('There are too many polygons in this shortcut:',
nr)
output_file.write(pack(b'<H', nr))
output_file.close()
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
adr += 2 * nr
output_file.close()
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'
)
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[x, y]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
hole_space = 0
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space),
'% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space),
'% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
import json
from datetime import datetime
from math import ceil, floor
from os.path import abspath, join, pardir
from struct import pack
from .global_settings import (
DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
)
# # # keep in mind: the faster numba optimized helper fct. cannot be used here,
# # # because numpy classes are not being used at this stage yet!
from .helpers import coord2int, inside_polygon, int2coord
# from helpers import coord2int, inside_polygon, int2coord
# from global_settings import (
# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
# )
# import sys
# from os.path import dirname
#
# sys.path.insert(0, dirname(__file__))
# from helpers import coord2int, int2coord, inside_polygon
"""
TODO write tests
USE INSTRUCTIONS:
- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases
- unzip and place the combined.json inside this timezonefinder folder
- run this file_converter.py as a script until the compilation of the binary files is completed.
IMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster
and it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)
B = unsigned char (1byte = 8bit Integer)
H = unsigned short (2 byte integer)
I = unsigned 4byte integer
i = signed 4byte integer
Binaries being written:
[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)
poly_zone_ids: the related zone_id for every polygon ('<H')
poly_coord_amount: the amount of coordinates in every polygon ('<I')
poly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')
poly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)
poly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)
poly_nr2zone_id: the polygon number of the first polygon from every zone('<H')
[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)
hole_poly_ids: the related polygon_nr (=id) for every hole ('<H')
hole_coord_amount: the amount of coordinates in every hole ('<H')
hole_adr2data: address in hole_data.bin where data for every hole starts ('<I')
hole_data: coordinates for every hole (multiple times '<i')
[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.
-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts
shortcut here means storing for every cell in a grid of the world map which polygons are located in that cell
they can therefore be used to drastically reduce the amount of polygons which need to be checked in order to
decide which timezone a point is located in.
the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
this is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)
shortcuts_entry_amount: the amount of polygons for every shortcut ('<H')
shortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')
shortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')
shortcuts_unique_id: the zone id if only polygons from one zone are present,
a high number (with no corresponding zone) if not ('<H').
the majority of zones either have no polygons at all (sea) or just one zone.
this zone then can be instantly returned without actually testing polygons.
also stored extra binary if only one zone (to directly return that zone without checking)
statistics: (data version 2018g)
maximal amount of coordinates in one polygon: 139130
amount_of_holes: 219
amount of polygons: 1177
shortcut statistics:
highest entry amount is 46
frequencies of entry amounts (from 0 to max entries):
[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
relative accumulated frequencies [%]:
[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
58.92 % of all shortcuts are empty
highest amount of different zones in one shortcut is 7
frequencies of entry amounts (from 0 to max):
[76359, 45555, 6963, 672, 43, 6, 1, 1]
relative accumulated frequencies [%]:
[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]
[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]
--------------------------------
The number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)
The number of polygons is: 1177
The number of floats in all the polygons is (2 per point): 10887056
writing file " poly_nr2zone_id.bin "
Done
writing file " poly_zone_ids.bin "
writing file " poly_max_values.bin "
writing file " poly_data.bin "
writing file " poly_adr2data.bin "
writing file " poly_coord_amount.bin "
writing file " shortcuts_entry_amount.bin "
writing file " shortcuts_adr2data.bin "
writing file " shortcuts_data.bin "
writing file " shortcuts_unique_id.bin "
writing file " hole_poly_ids.bin "
writing file " hole_coord_amount.bin "
writing file " hole_adr2data.bin "
writing file " hole_data.bin "
the polygon data makes up 97.11 % of the data
the shortcuts make up 2.01 % of the data
holes make up 0.88 % of the data
"""
nr_of_lines = -1
all_tz_names = []
poly_zone_ids = []
all_boundaries = []
all_coords = []
all_lengths = []
amount_of_holes = 0
polynrs_of_holes = []
all_holes = []
all_hole_lengths = []
list_of_pointers = []
poly_nr2zone_id = []
shortcuts = {}
def x_shortcut(lng):
# higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)
# if lng < -180 or lng >= 180:
# raise ValueError('longitude out of bounds', lng)
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
# lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)
# if lat < -90 or lat >= 90:
# raise ValueError('this latitude is out of bounds', lat)
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
# returns True if a zone with those boundaries could have more than 4 shortcuts
return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT
def percent(numerator, denominator):
return round((numerator / denominator) * 100, 2)
def accumulated_frequency(int_list):
out = []
total = sum(int_list)
acc = 0
for e in int_list:
acc += e
out.append(percent(acc, total))
return out
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
def point_between(p1, p2):
return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
def _polygons(id_list):
for i in id_list:
yield all_coords[i]
def not_empty(iterable):
for i in iterable:
return True
return False
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
def _holes_in_poly(poly_nr):
i = 0
for nr in polynrs_of_holes:
if nr == poly_nr:
yield all_holes[i]
i += 1
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
# this counter just counts polygons, not holes!
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get("tzid")
# print(tz_name)
all_tz_names.append(tz_name)
geometry = tz_dict.get("geometry")
if geometry.get('type') == 'MultiPolygon':
# depth is 4
multipolygon = geometry.get("coordinates")
else:
# depth is 3 (only one polygon, possibly with holes!)
multipolygon = [geometry.get("coordinates")]
# multipolygon has depth 4
# assert depth_of_array(multipolygon) == 4
for poly_with_hole in multipolygon:
# assert len(poly_with_hole) > 0
# the first entry is polygon
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
# assert len(x_coords) > 0
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
# everything else is interpreted as a hole!
for hole in poly_with_hole:
print(polygon_counter, tz_name)
# keep track of how many holes there are
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
# 34621 in tz_world 2016d (small enough for int16)
# 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)
raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',
max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
# 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)
raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',
max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
# 24k in tz_world 2016d
# 1022 in evansiroky/timezone-boundary-builder 2017a
raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',
nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
# 420 different zones in evansiroky/timezone-boundary-builder 2017a
# used in shortcuts_unique_id and poly_zone_ids
raise ValueError('zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
# pickle the zone names (python array)
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
# show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return ((y - y1) * (x2 - x1) / delta_y) + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return ((x - x1) * (y2 - y1) / delta_x) + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
# print('Y1<=y')
if y_coords[iplus1] > y:
# this was a crossing. compute the intersect
# print('Y2>y')
intersects.append(
compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
# print('Y1>y')
if y_coords[iplus1] <= y:
# this was a crossing. compute the intersect
# print('Y2<=y')
intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
# this was a crossing. compute the intersect
intersects.append(
compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
if x_coords[iplus1] <= x:
# this was a crossing. compute the intersect
intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
# x_longs = binary_reader.x_coords_of(line)
x_longs, y_longs = ints_of(line)
# y_longs = binary_reader.y_coords_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
# print('checking the latitudes')
for lat in latitudes_to_check(ymax, ymin):
# print(lat)
# print(coordinate_to_longlong(lat))
# print(y_longs)
# print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))
# raise ValueError
intersects = sorted([int2coord(x) for x in
x_intersections(coord2int(lat), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut zone here!
# only select the top shortcut if it is actually inside the polygon (point a little up is inside)
if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))
# the bottom shortcut is always selected
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))
else:
# add all the shortcuts for the whole found area of intersection
possible_y_shortcut = y_shortcut(lat)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1, x_longs,
y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# the shortcut above and below of the intersection should be selected!
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))
else:
# polygon does not cross the border!
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# only the shortcut above of the intersection should be selected!
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
# print('now all the longitudes to check')
# same procedure horizontally
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
# print(lng)
# print(coordinate_to_longlong(lng))
# print(x_longs)
# print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))
intersects = sorted([int2coord(y) for y in
y_intersections(coord2int(lng), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut here!
# only select the left shortcut if it is actually inside the polygon (point a little left is inside)
if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))
# the right shortcut is always selected
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))
else:
# add all the shortcuts for the whole found area of intersection
possible_x_shortcut = x_shortcut(lng)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle), x_longs,
y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
# both shortcuts right and left of the intersection should be selected!
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
# only the shortcut right of the intersection should be selected!
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
# xmax, xmin, ymax, ymin = boundaries_of(line=line)
if line % 100 == 0:
print(line)
# print([xmax, xmin, ymax, ymin])
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
# print('line ' + str(line))
# print('This is a big zone! computing exact shortcuts')
# print('Nr of entries before')
# print(len(column_nrs) * len(row_nrs))
# print('columns and rows before optimisation:')
# print(column_nrs)
# print(row_nrs)
# print(ints_of(line))
# This is a big zone! compute exact shortcuts with the whole polygon points
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)
# n += len(shortcuts_for_line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
# remove shortcuts from outside the possible/valid area
for x, y in shortcuts_for_line:
if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
# print('and after:')
# print(len(shortcuts_for_line))
# print(shortcuts_for_line)
# column_nrs_after = set()
# row_nrs_after = set()
# for x, y in shortcuts_for_line:
# column_nrs_after.add(x)
# row_nrs_after.add(y)
# print(column_nrs_after)
# print(row_nrs_after)
# print(shortcuts_for_line)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!')
if len(shortcuts_for_line) < 3:
raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
# print(shortcuts_for_line)
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
# print('collected entries:')
# print(n)
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
# there are two floats per coordinate (lng, lat)
nr_of_floats = 2 * sum(all_lengths)
# write number of entries in shortcut field (x,y)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
# TODO write test
# the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
# this is critical for ruling out zones faster
# (as soon as just polygons of one zone are left this zone can be returned)
# only around 5% of all shortcuts include polygons from more than one zone
# in most of those cases there are only two types of zones (= entries in counted_zones) and one of them
# has only one entry (important to check the zone with one entry first!).
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
# also make sure polygons with the same zone freq. are ordered after their zone id
# (polygons from different zones should not get mixed up)
sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])
return [x[0] for x in sort] # take only the polygon nrs
# count how many shortcut addresses will be written:
# flatten out the shortcuts in one list in the order they are going to be written inside the polygon file
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
# print((x,y,this_lines_shortcuts))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',
round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')
# for every shortcut <H and <I is written (nr of entries and address)
shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)
for nr in nr_of_entries_in_shortcut:
# every line in every shortcut takes up 2bytes
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):', nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
# write zone_ids
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
# write boundary_data
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))
output_file.close()
# write polygon_data, addresses and number of values
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
# [SHORTCUT AREA]
# write all nr of entries
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError("There are too many polygons in this shortcut:", nr)
output_file.write(pack(b'<H', nr))
output_file.close()
# write Address of first Polygon_nr in shortcut field (x,y)
# Attention: 0 is written when no entries are in this shortcut
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
# each line_nr takes up 2 bytes of space
adr += 2 * nr
output_file.close()
# write Line_Nrs for every shortcut
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
# write corresponding zone id for every shortcut (iff unique)
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
# there is a polygon from a different zone (hence an invalid id should be written)
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
# also write an Invalid Id when there is no polygon at all
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
# [HOLE AREA, Y = number of holes (very few: around 22)]
hole_space = 0
# '<H' for every hole store the related line
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
# '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
# '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
# each pair of points takes up 8 bytes of space
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
# Y times [ 2x i signed ints for every hole: x coords, y coords ]
# write hole polygon_data
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
if __name__ == '__main__':
# parsing the data from the .json into RAM
parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)
# update all the zone names and set the right ids to be written in the poly_zone_ids.bin
# sort data according to zone_id
update_zone_names(path=TIMEZONE_NAMES_FILE)
# IMPORTANT: import the newly compiled timezone_names pickle!
# the compilation process needs the new version of the timezone names
with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:
timezone_names = json.loads(f.read())
# compute shortcuts and write everything into the binaries
compile_binaries()
|
flexible
|
{
"blob_id": "52e43f795c864340734de2640e3c1a70b05e8ea0",
"index": 7248,
"step-1": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\n<mask token>\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\n<mask token>\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\n<mask token>\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\n<mask token>\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\n<mask token>\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\n<mask token>\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\n<mask token>\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\ndef accumulated_frequency(int_list):\n out = []\n total = sum(int_list)\n acc = 0\n for e in int_list:\n acc += e\n out.append(percent(acc, total))\n return out\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef not_empty(iterable):\n for i in iterable:\n return True\n return False\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-5": "# -*- coding:utf-8 -*-\nimport json\nfrom datetime import datetime\nfrom math import ceil, floor\nfrom os.path import abspath, join, pardir\nfrom struct import pack\n\nfrom .global_settings import (\n DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,\n NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,\n)\n# # # keep in mind: the faster numba optimized helper fct. cannot be used here,\n# # # because numpy classes are not being used at this stage yet!\nfrom .helpers import coord2int, inside_polygon, int2coord\n\n# from helpers import coord2int, inside_polygon, int2coord\n# from global_settings import (\n# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,\n# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,\n# )\n\n\n# import sys\n# from os.path import dirname\n#\n# sys.path.insert(0, dirname(__file__))\n# from helpers import coord2int, int2coord, inside_polygon\n\n\n\"\"\"\nTODO write tests\n\nUSE INSTRUCTIONS:\n\n- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases\n- unzip and place the combined.json inside this timezonefinder folder\n- run this file_converter.py as a script until the compilation of the binary files is completed.\n\n\nIMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster\nand it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)\n\nB = unsigned char (1byte = 8bit Integer)\nH = unsigned short (2 byte integer)\nI = unsigned 4byte integer\ni = signed 4byte integer\n\n\nBinaries being written:\n\n[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)\npoly_zone_ids: the related zone_id for every polygon ('<H')\npoly_coord_amount: the amount of coordinates in every polygon ('<I')\npoly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')\npoly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)\npoly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)\npoly_nr2zone_id: the polygon number of the first polygon from every zone('<H')\n\n[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)\nhole_poly_ids: the related polygon_nr (=id) for every hole ('<H')\nhole_coord_amount: the amount of coordinates in every hole ('<H')\nhole_adr2data: address in hole_data.bin where data for every hole starts ('<I')\nhole_data: coordinates for every hole (multiple times '<i')\n\n[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.\n-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts\nshortcut here means storing for every cell in a grid of the world map which polygons are located in that cell\nthey can therefore be used to drastically reduce the amount of polygons which need to be checked in order to\ndecide which timezone a point is located in.\n\nthe list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id\nthis is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)\n\nshortcuts_entry_amount: the amount of polygons for every shortcut ('<H')\nshortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')\nshortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')\nshortcuts_unique_id: the zone id if only polygons from one zone are present,\n a high number (with no corresponding zone) if not ('<H').\n the majority of zones either have no polygons at all (sea) or just one zone.\n this zone then can be instantly returned without actually testing polygons.\n\nalso stored extra binary if only one zone (to directly return that zone without checking)\n\n\n\nstatistics: (data version 2018g)\n\n\nmaximal amount of coordinates in one polygon: 139130\namount_of_holes: 219\namount of polygons: 1177\n\nshortcut statistics:\nhighest entry amount is 46\nfrequencies of entry amounts (from 0 to max entries):\n[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\nrelative accumulated frequencies [%]:\n[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,\n 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,\n 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]\n[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0]\n58.92 % of all shortcuts are empty\n\nhighest amount of different zones in one shortcut is 7\nfrequencies of entry amounts (from 0 to max):\n[76359, 45555, 6963, 672, 43, 6, 1, 1]\nrelative accumulated frequencies [%]:\n[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]\n[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]\n--------------------------------\n\nThe number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)\nThe number of polygons is: 1177\nThe number of floats in all the polygons is (2 per point): 10887056\nwriting file \" poly_nr2zone_id.bin \"\nDone\n\nwriting file \" poly_zone_ids.bin \"\nwriting file \" poly_max_values.bin \"\nwriting file \" poly_data.bin \"\nwriting file \" poly_adr2data.bin \"\nwriting file \" poly_coord_amount.bin \"\nwriting file \" shortcuts_entry_amount.bin \"\nwriting file \" shortcuts_adr2data.bin \"\nwriting file \" shortcuts_data.bin \"\nwriting file \" shortcuts_unique_id.bin \"\nwriting file \" hole_poly_ids.bin \"\nwriting file \" hole_coord_amount.bin \"\nwriting file \" hole_adr2data.bin \"\nwriting file \" hole_data.bin \"\nthe polygon data makes up 97.11 % of the data\nthe shortcuts make up 2.01 % of the data\nholes make up 0.88 % of the data\n\"\"\"\n\nnr_of_lines = -1\nall_tz_names = []\npoly_zone_ids = []\nall_boundaries = []\nall_coords = []\nall_lengths = []\namount_of_holes = 0\npolynrs_of_holes = []\nall_holes = []\nall_hole_lengths = []\nlist_of_pointers = []\npoly_nr2zone_id = []\nshortcuts = {}\n\n\ndef x_shortcut(lng):\n # higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)\n # if lng < -180 or lng >= 180:\n # raise ValueError('longitude out of bounds', lng)\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n # lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)\n # if lat < -90 or lat >= 90:\n # raise ValueError('this latitude is out of bounds', lat)\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n # returns True if a zone with those boundaries could have more than 4 shortcuts\n return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT\n\n\ndef percent(numerator, denominator):\n return round((numerator / denominator) * 100, 2)\n\n\ndef accumulated_frequency(int_list):\n out = []\n total = sum(int_list)\n acc = 0\n for e in int_list:\n acc += e\n out.append(percent(acc, total))\n\n return out\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\ndef _polygons(id_list):\n for i in id_list:\n yield all_coords[i]\n\n\ndef not_empty(iterable):\n for i in iterable:\n return True\n return False\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n # this counter just counts polygons, not holes!\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n\n tz_name = tz_dict.get('properties').get(\"tzid\")\n # print(tz_name)\n all_tz_names.append(tz_name)\n geometry = tz_dict.get(\"geometry\")\n if geometry.get('type') == 'MultiPolygon':\n # depth is 4\n multipolygon = geometry.get(\"coordinates\")\n else:\n # depth is 3 (only one polygon, possibly with holes!)\n multipolygon = [geometry.get(\"coordinates\")]\n # multipolygon has depth 4\n # assert depth_of_array(multipolygon) == 4\n for poly_with_hole in multipolygon:\n # assert len(poly_with_hole) > 0\n # the first entry is polygon\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n # IMPORTANT: do not use the last value (is equal to the first)!\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n # assert len(x_coords) > 0\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n\n # everything else is interpreted as a hole!\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n # keep track of how many holes there are\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n # IMPORTANT: do not use the last value (is equal to the first)!\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n\n polygon_counter += 1\n\n current_zone_id += 1\n\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n # 34621 in tz_world 2016d (small enough for int16)\n # 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)\n raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',\n max(all_lengths))\n\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n # 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)\n raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',\n max(all_hole_lengths))\n\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)\n\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n # 24k in tz_world 2016d\n # 1022 in evansiroky/timezone-boundary-builder 2017a\n raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',\n nr_of_lines, 'polygons')\n\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n # 420 different zones in evansiroky/timezone-boundary-builder 2017a\n # used in shortcuts_unique_id and poly_zone_ids\n raise ValueError('zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n\n if 0 in all_lengths:\n raise ValueError()\n\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n # pickle the zone names (python array)\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\\n')\n\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n\n amount_of_different_zones.append(len(registered_zone_ids))\n\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n # show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return ((y - y1) * (x2 - x1) / delta_y) + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return ((x - x1) * (y2 - y1) / delta_x) + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n # print('Y1<=y')\n if y_coords[iplus1] > y:\n # this was a crossing. compute the intersect\n # print('Y2>y')\n intersects.append(\n compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n else:\n # print('Y1>y')\n if y_coords[iplus1] <= y:\n # this was a crossing. compute the intersect\n # print('Y2<=y')\n intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],\n y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n # this was a crossing. compute the intersect\n intersects.append(\n compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n else:\n if x_coords[iplus1] <= x:\n # this was a crossing. compute the intersect\n intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],\n y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n\n # x_longs = binary_reader.x_coords_of(line)\n x_longs, y_longs = ints_of(line)\n\n # y_longs = binary_reader.y_coords_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n\n step = 1 / NR_SHORTCUTS_PER_LAT\n # print('checking the latitudes')\n for lat in latitudes_to_check(ymax, ymin):\n # print(lat)\n # print(coordinate_to_longlong(lat))\n # print(y_longs)\n # print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))\n # raise ValueError\n intersects = sorted([int2coord(x) for x in\n x_intersections(coord2int(lat), x_longs, y_longs)])\n # print(intersects)\n\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError('an uneven number of intersections has been accounted')\n\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n # collect all the zones between two intersections [in,out,in,out,...]\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n # the polygon has a point exactly on the border of a shortcut zone here!\n # only select the top shortcut if it is actually inside the polygon (point a little up is inside)\n if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,\n y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))\n # the bottom shortcut is always selected\n shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))\n\n else:\n # add all the shortcuts for the whole found area of intersection\n possible_y_shortcut = y_shortcut(lat)\n\n # both shortcuts should only be selected when the polygon doesnt stays on the border\n middle = intersection_in + (intersection_out - intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1, x_longs,\n y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n\n possible_longitudes.append(intersection_out)\n\n # the shortcut above and below of the intersection should be selected!\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))\n else:\n # polygon does not cross the border!\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n\n possible_longitudes.append(intersection_out)\n\n # only the shortcut above of the intersection should be selected!\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))\n\n # print('now all the longitudes to check')\n # same procedure horizontally\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n # print(lng)\n # print(coordinate_to_longlong(lng))\n # print(x_longs)\n # print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))\n intersects = sorted([int2coord(y) for y in\n y_intersections(coord2int(lng), x_longs, y_longs)])\n # print(intersects)\n\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError('an uneven number of intersections has been accounted')\n\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n # collect all the zones between two intersections [in,out,in,out,...]\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n # the polygon has a point exactly on the border of a shortcut here!\n # only select the left shortcut if it is actually inside the polygon (point a little left is inside)\n if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,\n y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))\n # the right shortcut is always selected\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))\n\n else:\n # add all the shortcuts for the whole found area of intersection\n possible_x_shortcut = x_shortcut(lng)\n\n # both shortcuts should only be selected when the polygon doesnt stays on the border\n middle = intersection_in + (intersection_out - intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle), x_longs,\n y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n\n possible_latitudes.append(intersection_out)\n\n # both shortcuts right and left of the intersection should be selected!\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))\n shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))\n\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n # only the shortcut right of the intersection should be selected!\n possible_latitudes.append(intersection_out)\n\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))\n\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n # xmax, xmin, ymax, ymin = boundaries_of(line=line)\n if line % 100 == 0:\n print(line)\n # print([xmax, xmin, ymax, ymin])\n\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n\n if big_zone(xmax, xmin, ymax, ymin):\n\n # print('line ' + str(line))\n # print('This is a big zone! computing exact shortcuts')\n # print('Nr of entries before')\n # print(len(column_nrs) * len(row_nrs))\n # print('columns and rows before optimisation:')\n # print(column_nrs)\n # print(row_nrs)\n # print(ints_of(line))\n\n # This is a big zone! compute exact shortcuts with the whole polygon points\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)\n # n += len(shortcuts_for_line)\n\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n\n # remove shortcuts from outside the possible/valid area\n for x, y in shortcuts_for_line:\n if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:\n shortcuts_to_remove.append((x, y))\n\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n\n # print('and after:')\n # print(len(shortcuts_for_line))\n # print(shortcuts_for_line)\n # column_nrs_after = set()\n # row_nrs_after = set()\n # for x, y in shortcuts_for_line:\n # column_nrs_after.add(x)\n # row_nrs_after.add(y)\n # print(column_nrs_after)\n # print(row_nrs_after)\n # print(shortcuts_for_line)\n\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!')\n if len(shortcuts_for_line) < 3:\n raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')\n\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n # print(shortcuts_for_line)\n\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n\n line += 1\n # print('collected entries:')\n # print(n)\n\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n\n # there are two floats per coordinate (lng, lat)\n nr_of_floats = 2 * sum(all_lengths)\n\n # write number of entries in shortcut field (x,y)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n # TODO write test\n # the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id\n # this is critical for ruling out zones faster\n # (as soon as just polygons of one zone are left this zone can be returned)\n # only around 5% of all shortcuts include polygons from more than one zone\n # in most of those cases there are only two types of zones (= entries in counted_zones) and one of them\n # has only one entry (important to check the zone with one entry first!).\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n # also make sure polygons with the same zone freq. are ordered after their zone id\n # (polygons from different zones should not get mixed up)\n sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])\n return [x[0] for x in sort] # take only the polygon nrs\n\n # count how many shortcut addresses will be written:\n # flatten out the shortcuts in one list in the order they are going to be written inside the polygon file\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[(x, y)]\n shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n # print((x,y,this_lines_shortcuts))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n\n if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n\n print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',\n round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')\n\n # for every shortcut <H and <I is written (nr of entries and address)\n shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)\n for nr in nr_of_entries_in_shortcut:\n # every line in every shortcut takes up 2bytes\n shortcut_space += NR_BYTES_H * nr\n\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):', nr_of_floats)\n\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n\n print('Done\\n')\n # write zone_ids\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n\n # write boundary_data\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))\n output_file.close()\n\n # write polygon_data, addresses and number of values\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n\n # [SHORTCUT AREA]\n # write all nr of entries\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError(\"There are too many polygons in this shortcut:\", nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n\n # write Address of first Polygon_nr in shortcut field (x,y)\n # Attention: 0 is written when no entries are in this shortcut\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n # each line_nr takes up 2 bytes of space\n adr += 2 * nr\n output_file.close()\n\n # write Line_Nrs for every shortcut\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n\n # write corresponding zone id for every shortcut (iff unique)\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[(x, y)]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n # there is a polygon from a different zone (hence an invalid id should be written)\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n # also write an Invalid Id when there is no polygon at all\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n\n output_file.close()\n # [HOLE AREA, Y = number of holes (very few: around 22)]\n hole_space = 0\n\n # '<H' for every hole store the related line\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n\n # '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n\n # '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n # each pair of points takes up 8 bytes of space\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n\n # Y times [ 2x i signed ints for every hole: x coords, y coords ]\n # write hole polygon_data\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n\n print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\nif __name__ == '__main__':\n # parsing the data from the .json into RAM\n parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)\n # update all the zone names and set the right ids to be written in the poly_zone_ids.bin\n # sort data according to zone_id\n update_zone_names(path=TIMEZONE_NAMES_FILE)\n\n # IMPORTANT: import the newly compiled timezone_names pickle!\n # the compilation process needs the new version of the timezone names\n with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:\n timezone_names = json.loads(f.read())\n\n # compute shortcuts and write everything into the binaries\n compile_binaries()\n",
"step-ids": [
11,
13,
15,
17,
22
]
}
|
[
11,
13,
15,
17,
22
] |
<|reserved_special_token_0|>
class IssuanceController(QObject):
def __init__(self, model):
super().__init__()
self._database_controller = DatabaseController()
self._model = model
<|reserved_special_token_0|>
@pyqtSlot(str)
def change_student_id(self, value):
self._model.student_id = value
@pyqtSlot(str)
def change_staff_id(self, value):
self._model.staff_id = value
@pyqtSlot(str)
def change_book_id(self, value):
self._model.book_id = value
<|reserved_special_token_0|>
@pyqtSlot(str)
def change_due_date(self, value):
self._model.due_date = value
@pyqtSlot(bool)
def add(self, value):
self._model.is_add_click = True if value else False
def GetIssuedBooks(self):
try:
mycursor = self._database_controller.CursorTuple()
mycursor.execute(mycursor.execute(
"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0"
))
books = mycursor.fetchall()
return books
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IssuanceController(QObject):
def __init__(self, model):
super().__init__()
self._database_controller = DatabaseController()
self._model = model
<|reserved_special_token_0|>
@pyqtSlot(str)
def change_student_id(self, value):
self._model.student_id = value
@pyqtSlot(str)
def change_staff_id(self, value):
self._model.staff_id = value
@pyqtSlot(str)
def change_book_id(self, value):
self._model.book_id = value
<|reserved_special_token_0|>
@pyqtSlot(str)
def change_due_date(self, value):
self._model.due_date = value
@pyqtSlot(bool)
def add(self, value):
self._model.is_add_click = True if value else False
def GetIssuedBooks(self):
try:
mycursor = self._database_controller.CursorTuple()
mycursor.execute(mycursor.execute(
"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0"
))
books = mycursor.fetchall()
return books
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
<|reserved_special_token_0|>
def UpdateIssuance(self):
try:
mycursor = self._database_controller.Cursor()
sql = 'UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s'
val = self._model.issuance_id,
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, 'record inserted.'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IssuanceController(QObject):
def __init__(self, model):
super().__init__()
self._database_controller = DatabaseController()
self._model = model
@pyqtSlot(str)
def change_issuance_id(self, value):
self._model.issuance_id = value
@pyqtSlot(str)
def change_student_id(self, value):
self._model.student_id = value
@pyqtSlot(str)
def change_staff_id(self, value):
self._model.staff_id = value
@pyqtSlot(str)
def change_book_id(self, value):
self._model.book_id = value
@pyqtSlot(str)
def change_release_date(self, value):
self._model.release_date = value
@pyqtSlot(str)
def change_due_date(self, value):
self._model.due_date = value
@pyqtSlot(bool)
def add(self, value):
self._model.is_add_click = True if value else False
def GetIssuedBooks(self):
try:
mycursor = self._database_controller.CursorTuple()
mycursor.execute(mycursor.execute(
"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0"
))
books = mycursor.fetchall()
return books
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
def BorrowBook(self):
try:
mycursor = self._database_controller.Cursor()
sql = (
'INSERT INTO issuance (student_id, staff_id, book_id, release_date, due_date, is_returned) VALUES (%s, %s, %s, %s, %s, %s)'
)
val = (self._model.student_id, self._model.staff_id, self.
_model.book_id, self._model.release_date, self._model.
due_date, '0')
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, 'record inserted.'
def UpdateIssuance(self):
try:
mycursor = self._database_controller.Cursor()
sql = 'UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s'
val = self._model.issuance_id,
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, 'record inserted.'
<|reserved_special_token_1|>
from PyQt5.QtCore import QObject, pyqtSlot
from Controllers.BookController import BookController
from Model.BookModel import BookModel
from Controllers.DatabaseController import DatabaseController
class IssuanceController(QObject):
def __init__(self, model):
super().__init__()
self._database_controller = DatabaseController()
self._model = model
@pyqtSlot(str)
def change_issuance_id(self, value):
self._model.issuance_id = value
@pyqtSlot(str)
def change_student_id(self, value):
self._model.student_id = value
@pyqtSlot(str)
def change_staff_id(self, value):
self._model.staff_id = value
@pyqtSlot(str)
def change_book_id(self, value):
self._model.book_id = value
@pyqtSlot(str)
def change_release_date(self, value):
self._model.release_date = value
@pyqtSlot(str)
def change_due_date(self, value):
self._model.due_date = value
@pyqtSlot(bool)
def add(self, value):
self._model.is_add_click = True if value else False
def GetIssuedBooks(self):
try:
mycursor = self._database_controller.CursorTuple()
mycursor.execute(mycursor.execute(
"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0"
))
books = mycursor.fetchall()
return books
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
def BorrowBook(self):
try:
mycursor = self._database_controller.Cursor()
sql = (
'INSERT INTO issuance (student_id, staff_id, book_id, release_date, due_date, is_returned) VALUES (%s, %s, %s, %s, %s, %s)'
)
val = (self._model.student_id, self._model.staff_id, self.
_model.book_id, self._model.release_date, self._model.
due_date, '0')
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, 'record inserted.'
def UpdateIssuance(self):
try:
mycursor = self._database_controller.Cursor()
sql = 'UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s'
val = self._model.issuance_id,
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print('Something went wrong: {}'.format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, 'record inserted.'
<|reserved_special_token_1|>
from PyQt5.QtCore import QObject, pyqtSlot
from Controllers.BookController import BookController
from Model.BookModel import BookModel
from Controllers.DatabaseController import DatabaseController
#Issuance Controller class contains the issuance properties and performs database operations for the issuance
class IssuanceController(QObject):
def __init__(self, model):
super().__init__()
self._database_controller = DatabaseController()
self._model = model
@pyqtSlot(str)
def change_issuance_id(self, value):
self._model.issuance_id = value
@pyqtSlot(str)
def change_student_id(self, value):
self._model.student_id = value
@pyqtSlot(str)
def change_staff_id(self, value):
self._model.staff_id = value
@pyqtSlot(str)
def change_book_id(self, value):
self._model.book_id = value
@pyqtSlot(str)
def change_release_date(self, value):
self._model.release_date = value
@pyqtSlot(str)
def change_due_date(self, value):
self._model.due_date = value
@pyqtSlot(bool)
def add(self, value):
self._model.is_add_click = True if value else False
def GetIssuedBooks(self):
try:
mycursor = self._database_controller.CursorTuple()
mycursor.execute(mycursor.execute("SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0"))
books = mycursor.fetchall()
return books
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
finally:
self._database_controller.Close()
def BorrowBook(self):
try:
mycursor = self._database_controller.Cursor()
sql = "INSERT INTO issuance (student_id, staff_id, book_id, release_date, due_date, is_returned) VALUES (%s, %s, %s, %s, %s, %s)"
val = (
self._model.student_id,
self._model.staff_id,
self._model.book_id,
self._model.release_date,
self._model.due_date,
'0',
)
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, "record inserted."
def UpdateIssuance(self):
try:
mycursor = self._database_controller.Cursor()
sql = "UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s"
val = (
self._model.issuance_id,
)
mycursor.execute(sql, val)
self._database_controller.Commit()
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
finally:
self._database_controller.Close()
return mycursor.rowcount, "record inserted."
|
flexible
|
{
"blob_id": "1d4df09256324cce50fad096cdeff289af229728",
"index": 3132,
"step-1": "<mask token>\n\n\nclass IssuanceController(QObject):\n\n def __init__(self, model):\n super().__init__()\n self._database_controller = DatabaseController()\n self._model = model\n <mask token>\n\n @pyqtSlot(str)\n def change_student_id(self, value):\n self._model.student_id = value\n\n @pyqtSlot(str)\n def change_staff_id(self, value):\n self._model.staff_id = value\n\n @pyqtSlot(str)\n def change_book_id(self, value):\n self._model.book_id = value\n <mask token>\n\n @pyqtSlot(str)\n def change_due_date(self, value):\n self._model.due_date = value\n\n @pyqtSlot(bool)\n def add(self, value):\n self._model.is_add_click = True if value else False\n\n def GetIssuedBooks(self):\n try:\n mycursor = self._database_controller.CursorTuple()\n mycursor.execute(mycursor.execute(\n \"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0\"\n ))\n books = mycursor.fetchall()\n return books\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass IssuanceController(QObject):\n\n def __init__(self, model):\n super().__init__()\n self._database_controller = DatabaseController()\n self._model = model\n <mask token>\n\n @pyqtSlot(str)\n def change_student_id(self, value):\n self._model.student_id = value\n\n @pyqtSlot(str)\n def change_staff_id(self, value):\n self._model.staff_id = value\n\n @pyqtSlot(str)\n def change_book_id(self, value):\n self._model.book_id = value\n <mask token>\n\n @pyqtSlot(str)\n def change_due_date(self, value):\n self._model.due_date = value\n\n @pyqtSlot(bool)\n def add(self, value):\n self._model.is_add_click = True if value else False\n\n def GetIssuedBooks(self):\n try:\n mycursor = self._database_controller.CursorTuple()\n mycursor.execute(mycursor.execute(\n \"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0\"\n ))\n books = mycursor.fetchall()\n return books\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n <mask token>\n\n def UpdateIssuance(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = 'UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s'\n val = self._model.issuance_id,\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, 'record inserted.'\n",
"step-3": "<mask token>\n\n\nclass IssuanceController(QObject):\n\n def __init__(self, model):\n super().__init__()\n self._database_controller = DatabaseController()\n self._model = model\n\n @pyqtSlot(str)\n def change_issuance_id(self, value):\n self._model.issuance_id = value\n\n @pyqtSlot(str)\n def change_student_id(self, value):\n self._model.student_id = value\n\n @pyqtSlot(str)\n def change_staff_id(self, value):\n self._model.staff_id = value\n\n @pyqtSlot(str)\n def change_book_id(self, value):\n self._model.book_id = value\n\n @pyqtSlot(str)\n def change_release_date(self, value):\n self._model.release_date = value\n\n @pyqtSlot(str)\n def change_due_date(self, value):\n self._model.due_date = value\n\n @pyqtSlot(bool)\n def add(self, value):\n self._model.is_add_click = True if value else False\n\n def GetIssuedBooks(self):\n try:\n mycursor = self._database_controller.CursorTuple()\n mycursor.execute(mycursor.execute(\n \"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0\"\n ))\n books = mycursor.fetchall()\n return books\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n\n def BorrowBook(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = (\n 'INSERT INTO issuance (student_id, staff_id, book_id, release_date, due_date, is_returned) VALUES (%s, %s, %s, %s, %s, %s)'\n )\n val = (self._model.student_id, self._model.staff_id, self.\n _model.book_id, self._model.release_date, self._model.\n due_date, '0')\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, 'record inserted.'\n\n def UpdateIssuance(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = 'UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s'\n val = self._model.issuance_id,\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, 'record inserted.'\n",
"step-4": "from PyQt5.QtCore import QObject, pyqtSlot\nfrom Controllers.BookController import BookController\nfrom Model.BookModel import BookModel\nfrom Controllers.DatabaseController import DatabaseController\n\n\nclass IssuanceController(QObject):\n\n def __init__(self, model):\n super().__init__()\n self._database_controller = DatabaseController()\n self._model = model\n\n @pyqtSlot(str)\n def change_issuance_id(self, value):\n self._model.issuance_id = value\n\n @pyqtSlot(str)\n def change_student_id(self, value):\n self._model.student_id = value\n\n @pyqtSlot(str)\n def change_staff_id(self, value):\n self._model.staff_id = value\n\n @pyqtSlot(str)\n def change_book_id(self, value):\n self._model.book_id = value\n\n @pyqtSlot(str)\n def change_release_date(self, value):\n self._model.release_date = value\n\n @pyqtSlot(str)\n def change_due_date(self, value):\n self._model.due_date = value\n\n @pyqtSlot(bool)\n def add(self, value):\n self._model.is_add_click = True if value else False\n\n def GetIssuedBooks(self):\n try:\n mycursor = self._database_controller.CursorTuple()\n mycursor.execute(mycursor.execute(\n \"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0\"\n ))\n books = mycursor.fetchall()\n return books\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n\n def BorrowBook(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = (\n 'INSERT INTO issuance (student_id, staff_id, book_id, release_date, due_date, is_returned) VALUES (%s, %s, %s, %s, %s, %s)'\n )\n val = (self._model.student_id, self._model.staff_id, self.\n _model.book_id, self._model.release_date, self._model.\n due_date, '0')\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, 'record inserted.'\n\n def UpdateIssuance(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = 'UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s'\n val = self._model.issuance_id,\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print('Something went wrong: {}'.format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, 'record inserted.'\n",
"step-5": "from PyQt5.QtCore import QObject, pyqtSlot\nfrom Controllers.BookController import BookController\nfrom Model.BookModel import BookModel\nfrom Controllers.DatabaseController import DatabaseController\n\n#Issuance Controller class contains the issuance properties and performs database operations for the issuance\nclass IssuanceController(QObject):\n\n def __init__(self, model):\n super().__init__()\n self._database_controller = DatabaseController()\n self._model = model\n\n @pyqtSlot(str)\n def change_issuance_id(self, value):\n self._model.issuance_id = value\n\n @pyqtSlot(str)\n def change_student_id(self, value):\n self._model.student_id = value\n\n @pyqtSlot(str)\n def change_staff_id(self, value):\n self._model.staff_id = value\n\n @pyqtSlot(str)\n def change_book_id(self, value):\n self._model.book_id = value\n\n @pyqtSlot(str)\n def change_release_date(self, value):\n self._model.release_date = value\n\n @pyqtSlot(str)\n def change_due_date(self, value):\n self._model.due_date = value\n\n @pyqtSlot(bool)\n def add(self, value):\n self._model.is_add_click = True if value else False\n\n def GetIssuedBooks(self):\n try:\n mycursor = self._database_controller.CursorTuple()\n mycursor.execute(mycursor.execute(\"SELECT issuance_id, CONCAT(student.first_name, ' ', student.middle_name, ' ', student.last_name) AS full_name, student.student_id, book.title, issuance.book_id, issuance.release_date, issuance.due_date, CONCAT(staff.first_name, ' ', staff.middle_name, ' ', staff.last_name) AS staff_name FROM issuance LEFT JOIN book ON book.book_id = issuance.book_id LEFT JOIN student ON student.student_id = issuance.student_id LEFT JOIN staff ON staff.staff_id = issuance.staff_id WHERE issuance.is_returned = 0\"))\n books = mycursor.fetchall()\n return books\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n self._database_controller.Close()\n\n def BorrowBook(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = \"INSERT INTO issuance (student_id, staff_id, book_id, release_date, due_date, is_returned) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (\n self._model.student_id, \n self._model.staff_id, \n self._model.book_id, \n self._model.release_date, \n self._model.due_date,\n '0', \n )\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, \"record inserted.\"\n\n def UpdateIssuance(self):\n try:\n mycursor = self._database_controller.Cursor()\n sql = \"UPDATE issuance SET is_returned = 1 WHERE issuance_id = %s\"\n val = (\n self._model.issuance_id,\n )\n mycursor.execute(sql, val)\n self._database_controller.Commit()\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n self._database_controller.Close()\n return mycursor.rowcount, \"record inserted.\"\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
class Person(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='person')
age = models.PositiveSmallIntegerField()
bio = models.CharField(max_length=255)
class Brand(models.Model):
name = models.CharField(max_length=255)
coverImage = models.CharField(max_length=360)
logo = models.CharField(max_length=360)
class Product(models.Model):
title = models.CharField(max_length=32)
description = models.TextField(max_length=360)
price = models.IntegerField()
image = models.CharField(max_length=255, null=True)
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
def no_of_ratings(self):
ratings = Rating.objects.filter(product=self)
return len(ratings)
def avg_rating(self):
sum = 0
ratings = Rating.objects.filter(product=self)
for rating in ratings:
sum += rating.stars
if len(ratings) > 0:
return sum / len(ratings)
else:
return 0
class Rating(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
stars = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])
class Meta:
unique_together = (('user', 'product'))
index_together = (('user', 'product'))
|
normal
|
{
"blob_id": "6de9fffd91d2f7602f7c681253211077704ba8c4",
"index": 2039,
"step-1": "<mask token>\n\n\nclass Product(models.Model):\n title = models.CharField(max_length=32)\n description = models.TextField(max_length=360)\n price = models.IntegerField()\n image = models.CharField(max_length=255, null=True)\n brand = models.ForeignKey(Brand, on_delete=models.CASCADE)\n user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)\n\n def no_of_ratings(self):\n ratings = Rating.objects.filter(product=self)\n return len(ratings)\n\n def avg_rating(self):\n sum = 0\n ratings = Rating.objects.filter(product=self)\n for rating in ratings:\n sum += rating.stars\n if len(ratings) > 0:\n return sum / len(ratings)\n else:\n return 0\n\n\nclass Rating(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stars = models.IntegerField(validators=[MinValueValidator(1),\n MaxValueValidator(5)])\n\n\n class Meta:\n unique_together = 'user', 'product'\n index_together = 'user', 'product'\n",
"step-2": "<mask token>\n\n\nclass Brand(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Product(models.Model):\n title = models.CharField(max_length=32)\n description = models.TextField(max_length=360)\n price = models.IntegerField()\n image = models.CharField(max_length=255, null=True)\n brand = models.ForeignKey(Brand, on_delete=models.CASCADE)\n user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)\n\n def no_of_ratings(self):\n ratings = Rating.objects.filter(product=self)\n return len(ratings)\n\n def avg_rating(self):\n sum = 0\n ratings = Rating.objects.filter(product=self)\n for rating in ratings:\n sum += rating.stars\n if len(ratings) > 0:\n return sum / len(ratings)\n else:\n return 0\n\n\nclass Rating(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stars = models.IntegerField(validators=[MinValueValidator(1),\n MaxValueValidator(5)])\n\n\n class Meta:\n unique_together = 'user', 'product'\n index_together = 'user', 'product'\n",
"step-3": "<mask token>\n\n\nclass Person(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Brand(models.Model):\n name = models.CharField(max_length=255)\n coverImage = models.CharField(max_length=360)\n logo = models.CharField(max_length=360)\n\n\nclass Product(models.Model):\n title = models.CharField(max_length=32)\n description = models.TextField(max_length=360)\n price = models.IntegerField()\n image = models.CharField(max_length=255, null=True)\n brand = models.ForeignKey(Brand, on_delete=models.CASCADE)\n user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)\n\n def no_of_ratings(self):\n ratings = Rating.objects.filter(product=self)\n return len(ratings)\n\n def avg_rating(self):\n sum = 0\n ratings = Rating.objects.filter(product=self)\n for rating in ratings:\n sum += rating.stars\n if len(ratings) > 0:\n return sum / len(ratings)\n else:\n return 0\n\n\nclass Rating(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stars = models.IntegerField(validators=[MinValueValidator(1),\n MaxValueValidator(5)])\n\n\n class Meta:\n unique_together = 'user', 'product'\n index_together = 'user', 'product'\n",
"step-4": "<mask token>\n\n\nclass Person(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE,\n related_name='person')\n age = models.PositiveSmallIntegerField()\n bio = models.CharField(max_length=255)\n\n\nclass Brand(models.Model):\n name = models.CharField(max_length=255)\n coverImage = models.CharField(max_length=360)\n logo = models.CharField(max_length=360)\n\n\nclass Product(models.Model):\n title = models.CharField(max_length=32)\n description = models.TextField(max_length=360)\n price = models.IntegerField()\n image = models.CharField(max_length=255, null=True)\n brand = models.ForeignKey(Brand, on_delete=models.CASCADE)\n user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)\n\n def no_of_ratings(self):\n ratings = Rating.objects.filter(product=self)\n return len(ratings)\n\n def avg_rating(self):\n sum = 0\n ratings = Rating.objects.filter(product=self)\n for rating in ratings:\n sum += rating.stars\n if len(ratings) > 0:\n return sum / len(ratings)\n else:\n return 0\n\n\nclass Rating(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stars = models.IntegerField(validators=[MinValueValidator(1),\n MaxValueValidator(5)])\n\n\n class Meta:\n unique_together = 'user', 'product'\n index_together = 'user', 'product'\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n\nclass Person(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='person')\n age = models.PositiveSmallIntegerField()\n bio = models.CharField(max_length=255)\n\nclass Brand(models.Model):\n name = models.CharField(max_length=255)\n coverImage = models.CharField(max_length=360)\n logo = models.CharField(max_length=360)\n\nclass Product(models.Model):\n title = models.CharField(max_length=32)\n description = models.TextField(max_length=360)\n price = models.IntegerField()\n image = models.CharField(max_length=255, null=True)\n brand = models.ForeignKey(Brand, on_delete=models.CASCADE)\n user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)\n\n def no_of_ratings(self):\n ratings = Rating.objects.filter(product=self)\n return len(ratings)\n\n def avg_rating(self):\n sum = 0\n ratings = Rating.objects.filter(product=self)\n for rating in ratings:\n sum += rating.stars\n if len(ratings) > 0:\n return sum / len(ratings)\n else:\n return 0\n\nclass Rating(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n stars = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(5)])\n\n class Meta:\n unique_together = (('user', 'product'))\n index_together = (('user', 'product'))\n",
"step-ids": [
6,
7,
9,
10,
12
]
}
|
[
6,
7,
9,
10,
12
] |
import os, subprocess
def greet(name):
hostname = subprocess.check_output("hostname").decode("utf-8")[:-1]
return "Hello, {}! I'm {}#{}.".format(name, hostname, os.getppid())
|
normal
|
{
"blob_id": "9bd55a2f224acfa2cb34d0ca14a25e8864d644b3",
"index": 5250,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef greet(name):\n hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n",
"step-3": "import os, subprocess\n\n\ndef greet(name):\n hostname = subprocess.check_output('hostname').decode('utf-8')[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n",
"step-4": "import os, subprocess\n\ndef greet(name):\n hostname = subprocess.check_output(\"hostname\").decode(\"utf-8\")[:-1]\n return \"Hello, {}! I'm {}#{}.\".format(name, hostname, os.getppid())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sns.set()
<|reserved_special_token_0|>
MG.pyroplot.spider(color='green', alpha=0.5, mode='fill')
VCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')
FG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')
FGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')
sns.set_style('darkgrid')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sns.set()
<|reserved_special_token_0|>
df = pd.read_csv(
'/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'
, index_col=0)
MG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',
'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]
VCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',
'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',
'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',
'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',
'ORA-5B-416', 'ORA-5B-417']]
FG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']
]
FGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',
'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',
'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',
'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]
MG.pyroplot.spider(color='green', alpha=0.5, mode='fill')
VCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')
FG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')
FGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')
sns.set_style('darkgrid')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pyrolite.plot
from pyrolite.plot.spider import spider
df = pd.read_csv(
'/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'
, index_col=0)
MG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',
'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]
VCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',
'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',
'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',
'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',
'ORA-5B-416', 'ORA-5B-417']]
FG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']
]
FGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',
'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',
'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',
'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]
MG.pyroplot.spider(color='green', alpha=0.5, mode='fill')
VCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')
FG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')
FGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')
sns.set_style('darkgrid')
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 13:36:13 2019
@author: gennachiaro
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pyrolite.plot
from pyrolite.plot.spider import spider
#read in data
df = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0)
#set values
MG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']]
VCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']]
FG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']]
FGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']]
#plot diagrams
MG.pyroplot.spider(color="green",alpha = 0.5, mode = "fill")
VCCR.pyroplot.spider(color="red",alpha = 0.5, mode = "fill")
FG.pyroplot.spider(color="purple",alpha = 0.5, mode = "fill")
FGCP.pyroplot.spider(color="blue",alpha = 0.5, mode = "fill")
#set background
sns.set_style("darkgrid")
#plot graph
plt.show()
|
flexible
|
{
"blob_id": "f6fee18898636ad6b0dc6d96d28dead4e09b8035",
"index": 1650,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsns.set()\n<mask token>\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n",
"step-3": "<mask token>\nsns.set()\n<mask token>\ndf = pd.read_csv(\n '/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'\n , index_col=0)\nMG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',\n 'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]\nVCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',\n 'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',\n 'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',\n 'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',\n 'ORA-5B-416', 'ORA-5B-417']]\nFG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']\n ]\nFGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',\n 'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',\n 'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',\n 'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n",
"step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport pyrolite.plot\nfrom pyrolite.plot.spider import spider\ndf = pd.read_csv(\n '/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'\n , index_col=0)\nMG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',\n 'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]\nVCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',\n 'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',\n 'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',\n 'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',\n 'ORA-5B-416', 'ORA-5B-417']]\nFG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']\n ]\nFGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',\n 'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',\n 'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',\n 'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 18 13:36:13 2019\n\n@author: gennachiaro\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport pyrolite.plot\nfrom pyrolite.plot.spider import spider\n\n#read in data\ndf = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0)\n\n#set values\nMG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']]\nVCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']]\nFG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']]\nFGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']]\n\n#plot diagrams\nMG.pyroplot.spider(color=\"green\",alpha = 0.5, mode = \"fill\")\n\nVCCR.pyroplot.spider(color=\"red\",alpha = 0.5, mode = \"fill\")\n\nFG.pyroplot.spider(color=\"purple\",alpha = 0.5, mode = \"fill\")\n\nFGCP.pyroplot.spider(color=\"blue\",alpha = 0.5, mode = \"fill\")\n\n\n#set background\nsns.set_style(\"darkgrid\")\n\n\n#plot graph\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
def create_map(rows):
maze = []
for row in rows:
row = row[:-1]
subarr = []
for i in row:
subarr.append(i)
maze.append(subarr)
return maze
def print_map(chart):
for subarr in chart:
print(subarr)
def find_start(chart):
for y in range(len(chart)):
row = chart[y]
for x in range(len(row)):
if row[x] == 'S':
return (y, x)
def find_exit(y, x, chart, path):
h = len(chart)
w = len(chart[0])
# left
if x-1 == 0 and chart[y][x-1] == ' ':
chart[y][x-1] = 'E'
path[(y, x-1)] = [y, x]
return
elif x-1 > 0 and chart[y][x-1] == ' ':
chart[y][x-1] = '0'
path[(y, x - 1)] = [y, x]
find_exit(y, x-1, chart, path)
# up
if y-1 == 0 and chart[y-1][x] == ' ':
chart[y-1][x] = 'E'
path[(y-1, x)] = [y, x]
return
elif y-1 > 0 and chart[y-1][x] == ' ':
chart[y-1][x] = '0'
path[(y - 1, x)] = [y, x]
find_exit(y-1, x, chart, path)
# right
if x+1 == w-1 and chart[y][x+1] == ' ':
chart[y][x+1] = 'E'
path[(y, x+1)] = [y, x]
return
elif x+1 < w - 1 and chart[y][x+1] == ' ':
chart[y][x+1] = '0'
path[(y, x + 1)] = [y, x]
find_exit(y, x+1, chart, path)
# down
if y+1 == h-1 and chart[y+1][x] == ' ':
chart[y+1][x] = 'E'
path[(y+1, x)] = [y, x]
return
elif y+1 < h - 1 and chart[y+1][x] == ' ':
chart[y+1][x] = '0'
path[(y + 1, x)] = [y, x]
find_exit(y+1, x, chart, path)
def check_exit(chart):
height = len(chart)
width = len(chart[0])
for x in range(width):
v = chart[0][x]
if v == 'E':
return True, 0, x
v = chart[height-1][x]
if v == 'E':
return True, height-1, x
for y in range(height):
v = chart[y][0]
if v == 'E':
return True, y, 0
v = chart[y][width-1]
if v == 'E':
return True, y, width-1
return False, -1, -1
if __name__ == '__main__':
file = open('../00_text_files/01_labyrinth.txt', 'rt')
labyrinth = file.readlines()
file.close()
maze = create_map(labyrinth)
start = find_start(maze)
maze[start[0]][start[1]] = '0'
path = {}
find_exit(start[0], start[1], maze, path)
print_map(maze)
ex = check_exit(maze)
if ex[0]:
y = ex[1]
x = ex[2]
print([y, x, maze[y][x]])
while True:
coord = (y, x)
if coord in path:
y, x = path[coord]
print([y, x, maze[y][x]])
else:
break
else:
print("NO WAY")
|
normal
|
{
"blob_id": "bde37f3b41c810ab465de5e0ae374703af9f01f3",
"index": 9033,
"step-1": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\n<mask token>\n",
"step-2": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n if x - 1 == 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = 'E'\n path[y, x - 1] = [y, x]\n return\n elif x - 1 > 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = '0'\n path[y, x - 1] = [y, x]\n find_exit(y, x - 1, chart, path)\n if y - 1 == 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = 'E'\n path[y - 1, x] = [y, x]\n return\n elif y - 1 > 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = '0'\n path[y - 1, x] = [y, x]\n find_exit(y - 1, x, chart, path)\n if x + 1 == w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = 'E'\n path[y, x + 1] = [y, x]\n return\n elif x + 1 < w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = '0'\n path[y, x + 1] = [y, x]\n find_exit(y, x + 1, chart, path)\n if y + 1 == h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = 'E'\n path[y + 1, x] = [y, x]\n return\n elif y + 1 < h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = '0'\n path[y + 1, x] = [y, x]\n find_exit(y + 1, x, chart, path)\n\n\n<mask token>\n",
"step-3": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n if x - 1 == 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = 'E'\n path[y, x - 1] = [y, x]\n return\n elif x - 1 > 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = '0'\n path[y, x - 1] = [y, x]\n find_exit(y, x - 1, chart, path)\n if y - 1 == 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = 'E'\n path[y - 1, x] = [y, x]\n return\n elif y - 1 > 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = '0'\n path[y - 1, x] = [y, x]\n find_exit(y - 1, x, chart, path)\n if x + 1 == w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = 'E'\n path[y, x + 1] = [y, x]\n return\n elif x + 1 < w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = '0'\n path[y, x + 1] = [y, x]\n find_exit(y, x + 1, chart, path)\n if y + 1 == h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = 'E'\n path[y + 1, x] = [y, x]\n return\n elif y + 1 < h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = '0'\n path[y + 1, x] = [y, x]\n find_exit(y + 1, x, chart, path)\n\n\ndef check_exit(chart):\n height = len(chart)\n width = len(chart[0])\n for x in range(width):\n v = chart[0][x]\n if v == 'E':\n return True, 0, x\n v = chart[height - 1][x]\n if v == 'E':\n return True, height - 1, x\n for y in range(height):\n v = chart[y][0]\n if v == 'E':\n return True, y, 0\n v = chart[y][width - 1]\n if v == 'E':\n return True, y, width - 1\n return False, -1, -1\n\n\n<mask token>\n",
"step-4": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n if x - 1 == 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = 'E'\n path[y, x - 1] = [y, x]\n return\n elif x - 1 > 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = '0'\n path[y, x - 1] = [y, x]\n find_exit(y, x - 1, chart, path)\n if y - 1 == 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = 'E'\n path[y - 1, x] = [y, x]\n return\n elif y - 1 > 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = '0'\n path[y - 1, x] = [y, x]\n find_exit(y - 1, x, chart, path)\n if x + 1 == w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = 'E'\n path[y, x + 1] = [y, x]\n return\n elif x + 1 < w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = '0'\n path[y, x + 1] = [y, x]\n find_exit(y, x + 1, chart, path)\n if y + 1 == h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = 'E'\n path[y + 1, x] = [y, x]\n return\n elif y + 1 < h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = '0'\n path[y + 1, x] = [y, x]\n find_exit(y + 1, x, chart, path)\n\n\ndef check_exit(chart):\n height = len(chart)\n width = len(chart[0])\n for x in range(width):\n v = chart[0][x]\n if v == 'E':\n return True, 0, x\n v = chart[height - 1][x]\n if v == 'E':\n return True, height - 1, x\n for y in range(height):\n v = chart[y][0]\n if v == 'E':\n return True, y, 0\n v = chart[y][width - 1]\n if v == 'E':\n return True, y, width - 1\n return False, -1, -1\n\n\nif __name__ == '__main__':\n file = open('../00_text_files/01_labyrinth.txt', 'rt')\n labyrinth = file.readlines()\n file.close()\n maze = create_map(labyrinth)\n start = find_start(maze)\n maze[start[0]][start[1]] = '0'\n path = {}\n find_exit(start[0], start[1], maze, path)\n print_map(maze)\n ex = check_exit(maze)\n if ex[0]:\n y = ex[1]\n x = ex[2]\n print([y, x, maze[y][x]])\n while True:\n coord = y, x\n if coord in path:\n y, x = path[coord]\n print([y, x, maze[y][x]])\n else:\n break\n else:\n print('NO WAY')\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\ndef create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return (y, x)\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n # left\n if x-1 == 0 and chart[y][x-1] == ' ':\n chart[y][x-1] = 'E'\n path[(y, x-1)] = [y, x]\n return\n elif x-1 > 0 and chart[y][x-1] == ' ':\n chart[y][x-1] = '0'\n path[(y, x - 1)] = [y, x]\n find_exit(y, x-1, chart, path)\n # up\n if y-1 == 0 and chart[y-1][x] == ' ':\n chart[y-1][x] = 'E'\n path[(y-1, x)] = [y, x]\n return\n elif y-1 > 0 and chart[y-1][x] == ' ':\n chart[y-1][x] = '0'\n path[(y - 1, x)] = [y, x]\n find_exit(y-1, x, chart, path)\n # right\n if x+1 == w-1 and chart[y][x+1] == ' ':\n chart[y][x+1] = 'E'\n path[(y, x+1)] = [y, x]\n return\n elif x+1 < w - 1 and chart[y][x+1] == ' ':\n chart[y][x+1] = '0'\n path[(y, x + 1)] = [y, x]\n find_exit(y, x+1, chart, path)\n # down\n if y+1 == h-1 and chart[y+1][x] == ' ':\n chart[y+1][x] = 'E'\n path[(y+1, x)] = [y, x]\n return\n elif y+1 < h - 1 and chart[y+1][x] == ' ':\n chart[y+1][x] = '0'\n path[(y + 1, x)] = [y, x]\n find_exit(y+1, x, chart, path)\n\n\ndef check_exit(chart):\n height = len(chart)\n width = len(chart[0])\n\n for x in range(width):\n v = chart[0][x]\n if v == 'E':\n return True, 0, x\n v = chart[height-1][x]\n if v == 'E':\n return True, height-1, x\n\n for y in range(height):\n v = chart[y][0]\n if v == 'E':\n return True, y, 0\n v = chart[y][width-1]\n if v == 'E':\n return True, y, width-1\n\n return False, -1, -1\n\n\nif __name__ == '__main__':\n file = open('../00_text_files/01_labyrinth.txt', 'rt')\n labyrinth = file.readlines()\n file.close()\n maze = create_map(labyrinth)\n start = find_start(maze)\n maze[start[0]][start[1]] = '0'\n path = {}\n find_exit(start[0], start[1], maze, path)\n print_map(maze)\n\n ex = check_exit(maze)\n if ex[0]:\n y = ex[1]\n x = ex[2]\n print([y, x, maze[y][x]])\n while True:\n coord = (y, x)\n if coord in path:\n y, x = path[coord]\n print([y, x, maze[y][x]])\n else:\n break\n else:\n print(\"NO WAY\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(85):
so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask
so2_only_masked[:, i, :] = so2_only[:, i, :] * mask
<|reserved_special_token_0|>
plt.rcParams.update(params)
<|reserved_special_token_0|>
fig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,
fontweight='bold')
fig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=
'vertical', fontsize=35, fontweight='bold')
<|reserved_special_token_0|>
for n in range(6):
ax1 = fig.add_subplot(gs[n, 0])
ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +
1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')
ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')
ax1.set_xlim([25, 85])
ax1.set_ylim([5, 20])
ax1.grid(which='minor', axis='y', alpha=0.2)
ax1.grid(which='minor', axis='x', alpha=0.2)
ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',
fontsize=25)
ax2 = fig.add_subplot(gs[n, 1])
ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,
:, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'
)
ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')
ax2.set_xlim([25, 85])
ax2.set_ylim([5, 20])
ax2.grid(which='minor', axis='y', alpha=0.2)
ax2.grid(which='minor', axis='x', alpha=0.2)
ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)
ax3 = fig.add_subplot(gs[n, 2])
cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,
:, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')
ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')
ax3.set_xlim([25, 85])
ax3.set_ylim([5, 20])
ax3.grid(which='minor', axis='y', alpha=0.2)
ax3.grid(which='minor', axis='x', alpha=0.2)
ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',
fontsize=25)
cax = fig.add_subplot(gs[:, -1])
plt.colorbar(cb, cax=cax, orientation='vertical', label=
'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')
i = i + 4
plt.savefig('Figure10.png', dpi=300)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
caliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy'
)
caliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')
so2_ash = np.load(
'SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'
)
so2_only = np.load(
'SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'
)
model_alts = np.load('Model_altitude.npy')
model_alts[0] = 0
model_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')
caliop_mask = np.nanmean(caliop, axis=(1, 2))
mask = np.ones((181, 12))
mask[np.isnan(caliop_mask)] = np.nan
so2_ash_masked = np.zeros((181, 85, 12))
so2_only_masked = np.zeros((181, 85, 12))
for i in range(85):
so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask
so2_only_masked[:, i, :] = so2_only[:, i, :] * mask
masked_tph = model_tph * mask
alts1 = np.linspace(-500, 20200, 346)
alts2 = np.linspace(20380, 29740, 53)
caliop_alts = np.hstack((alts1, alts2)) / 1000
latitude = range(-90, 91)
months = calendar.month_name[6:13] + calendar.month_name[1:6]
caliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis=2)
caliop_monthly_tph = np.nanmean(caliop_tph, axis=1)
params = {'legend.fontsize': 25, 'axes.labelsize': 30, 'axes.titlesize': 35,
'axes.linewidth': 3, 'axes.grid': True, 'xtick.labelsize': 25,
'ytick.labelsize': 25, 'xtick.major.size': 8, 'xtick.minor.size': 5,
'xtick.minor.visible': True, 'ytick.major.size': 8, 'ytick.minor.size':
5, 'ytick.minor.visible': True, 'lines.linewidth': 4}
plt.rcParams.update(params)
fig = plt.figure(figsize=(37, 38))
gs = fig.add_gridspec(6, 4, width_ratios=[25, 25, 25, 5])
fig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,
fontweight='bold')
fig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=
'vertical', fontsize=35, fontweight='bold')
col_map = mpl_cm.get_cmap('plasma')
lvs = np.linspace(0, 1.2, 13)
norm = colors.BoundaryNorm(lvs, col_map.N)
i = 1
for n in range(6):
ax1 = fig.add_subplot(gs[n, 0])
ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +
1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')
ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')
ax1.set_xlim([25, 85])
ax1.set_ylim([5, 20])
ax1.grid(which='minor', axis='y', alpha=0.2)
ax1.grid(which='minor', axis='x', alpha=0.2)
ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',
fontsize=25)
ax2 = fig.add_subplot(gs[n, 1])
ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,
:, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'
)
ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')
ax2.set_xlim([25, 85])
ax2.set_ylim([5, 20])
ax2.grid(which='minor', axis='y', alpha=0.2)
ax2.grid(which='minor', axis='x', alpha=0.2)
ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)
ax3 = fig.add_subplot(gs[n, 2])
cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,
:, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')
ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')
ax3.set_xlim([25, 85])
ax3.set_ylim([5, 20])
ax3.grid(which='minor', axis='y', alpha=0.2)
ax3.grid(which='minor', axis='x', alpha=0.2)
ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',
fontsize=25)
cax = fig.add_subplot(gs[:, -1])
plt.colorbar(cb, cax=cax, orientation='vertical', label=
'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')
i = i + 4
plt.savefig('Figure10.png', dpi=300)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import matplotlib.pyplot as plt
import calendar
import matplotlib.colors as colors
import matplotlib.cm as mpl_cm
caliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy'
)
caliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')
so2_ash = np.load(
'SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'
)
so2_only = np.load(
'SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'
)
model_alts = np.load('Model_altitude.npy')
model_alts[0] = 0
model_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')
caliop_mask = np.nanmean(caliop, axis=(1, 2))
mask = np.ones((181, 12))
mask[np.isnan(caliop_mask)] = np.nan
so2_ash_masked = np.zeros((181, 85, 12))
so2_only_masked = np.zeros((181, 85, 12))
for i in range(85):
so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask
so2_only_masked[:, i, :] = so2_only[:, i, :] * mask
masked_tph = model_tph * mask
alts1 = np.linspace(-500, 20200, 346)
alts2 = np.linspace(20380, 29740, 53)
caliop_alts = np.hstack((alts1, alts2)) / 1000
latitude = range(-90, 91)
months = calendar.month_name[6:13] + calendar.month_name[1:6]
caliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis=2)
caliop_monthly_tph = np.nanmean(caliop_tph, axis=1)
params = {'legend.fontsize': 25, 'axes.labelsize': 30, 'axes.titlesize': 35,
'axes.linewidth': 3, 'axes.grid': True, 'xtick.labelsize': 25,
'ytick.labelsize': 25, 'xtick.major.size': 8, 'xtick.minor.size': 5,
'xtick.minor.visible': True, 'ytick.major.size': 8, 'ytick.minor.size':
5, 'ytick.minor.visible': True, 'lines.linewidth': 4}
plt.rcParams.update(params)
fig = plt.figure(figsize=(37, 38))
gs = fig.add_gridspec(6, 4, width_ratios=[25, 25, 25, 5])
fig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,
fontweight='bold')
fig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=
'vertical', fontsize=35, fontweight='bold')
col_map = mpl_cm.get_cmap('plasma')
lvs = np.linspace(0, 1.2, 13)
norm = colors.BoundaryNorm(lvs, col_map.N)
i = 1
for n in range(6):
ax1 = fig.add_subplot(gs[n, 0])
ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +
1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')
ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')
ax1.set_xlim([25, 85])
ax1.set_ylim([5, 20])
ax1.grid(which='minor', axis='y', alpha=0.2)
ax1.grid(which='minor', axis='x', alpha=0.2)
ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',
fontsize=25)
ax2 = fig.add_subplot(gs[n, 1])
ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,
:, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'
)
ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')
ax2.set_xlim([25, 85])
ax2.set_ylim([5, 20])
ax2.grid(which='minor', axis='y', alpha=0.2)
ax2.grid(which='minor', axis='x', alpha=0.2)
ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)
ax3 = fig.add_subplot(gs[n, 2])
cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,
:, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')
ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')
ax3.set_xlim([25, 85])
ax3.set_ylim([5, 20])
ax3.grid(which='minor', axis='y', alpha=0.2)
ax3.grid(which='minor', axis='x', alpha=0.2)
ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',
fontsize=25)
cax = fig.add_subplot(gs[:, -1])
plt.colorbar(cb, cax=cax, orientation='vertical', label=
'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')
i = i + 4
plt.savefig('Figure10.png', dpi=300)
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 17:08:06 2023
@author: Alice Wells
Plotting script for Figure 10 in Wells et al., 2023
Aerosol extinction coefficient vertical profile averaged longitudinally.
Averaged monthly CALIOP (centre) aerosol extinction coefficient vertical
profiles (night retrievals only) with monthly average tropopause height
(solid black). UKESM1 SO2 only (left) and SO2+ash (right) simulations with
imposed CALIOP minimum retrieval limits and mask.
"""
# =============================================================================
# Import functions
# =============================================================================
import numpy as np
import matplotlib.pyplot as plt
import calendar
import matplotlib.colors as colors
import matplotlib.cm as mpl_cm
# =============================================================================
# Load data
# =============================================================================
#CALIOP observations
caliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy')
#CALIOP tropopause height
caliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')
#Model SO2+ash with CALIOP limits imposed
so2_ash = np.load('SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')
#Model SO2only with CALIOP limits imposed
so2_only = np.load('SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')
#Model altitude profile
model_alts = np.load('Model_altitude.npy')
model_alts[0] = 0
#Model tropopause height
model_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')
# =============================================================================
# Create the caliop model mask
# =============================================================================
#Find model points only where calipso data exists
caliop_mask = np.nanmean(caliop, axis = (1,2))
mask = np.ones( (181, 12) )
mask[np.isnan(caliop_mask)] = np.nan
#Mask the model data
so2_ash_masked = np.zeros( (181, 85, 12) )
so2_only_masked = np.zeros( (181, 85, 12) )
for i in range(85):
so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask
so2_only_masked[:, i, :] = so2_only[:, i, :] * mask
masked_tph = model_tph * mask
# =============================================================================
# Define altitude profile
# =============================================================================
alts1 = np.linspace(-500, 20200, 346)
alts2 = np.linspace(20380, 29740, 53)
caliop_alts = np.hstack( (alts1, alts2) )/1000
#Define latitude coordinates
latitude = range(-90, 91)
#Create months for plotting dates
months = calendar.month_name[6:13] + calendar.month_name[1:6]
#Calculate monthly average for CALIOP
caliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis = 2)
caliop_monthly_tph = np.nanmean(caliop_tph, axis = 1)
# =============================================================================
# Plotting
# =============================================================================
params = {'legend.fontsize': 25,
'axes.labelsize': 30,
'axes.titlesize':35,
'axes.linewidth':3,
'axes.grid': True,
'xtick.labelsize':25,
'ytick.labelsize':25,
'xtick.major.size': 8,
'xtick.minor.size': 5,
'xtick.minor.visible':True,
'ytick.major.size':8,
'ytick.minor.size':5,
'ytick.minor.visible':True,
'lines.linewidth': 4}
plt.rcParams.update(params)
fig = plt.figure(figsize = (37, 38))
gs = fig.add_gridspec(6, 4, width_ratios = [25, 25, 25, 5])
fig.text(0.5, 0.08, 'Latitude', ha = 'center', va = 'center', fontsize = 35, fontweight = 'bold')
fig.text(0.08, 0.5, 'Altitude [km]', ha = 'center', va = 'center', rotation = 'vertical', fontsize = 35, fontweight = 'bold')
col_map = mpl_cm.get_cmap('plasma')
lvs = np.linspace(0, 1.2, 13)
norm = colors.BoundaryNorm(lvs, col_map.N)
i = 1
for n in range(6):
ax1 = fig.add_subplot(gs[n, 0])
ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax1.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')
ax1.set_xlim([25, 85])
ax1.set_ylim([5, 20])
ax1.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax1.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax1.set_title('UKESM1 SO2only ' + months[n+1], fontweight = 'bold', fontsize = 25)
ax2 = fig.add_subplot(gs[n, 1])
ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:, :, n+1]*100000), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax2.plot(latitude, caliop_monthly_tph[:, n+1], linewidth = 4, color = 'k')
ax2.set_xlim([25, 85])
ax2.set_ylim([5, 20])
ax2.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax2.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax2.set_title('CALIOP ' + months[n+1], fontweight = 'bold', fontsize = 25)
ax3 = fig.add_subplot(gs[n, 2])
cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')
ax3.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')
ax3.set_xlim([25, 85])
ax3.set_ylim([5, 20])
ax3.grid(which = 'minor', axis = 'y', alpha = 0.2)
ax3.grid(which = 'minor', axis = 'x', alpha = 0.2)
ax3.set_title('UKESM1 SO2+ash ' + months[n+1], fontweight = 'bold', fontsize = 25)
cax = fig.add_subplot(gs[:, -1])
plt.colorbar(cb, cax=cax, orientation = 'vertical', label = 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')
i = i + 4
plt.savefig('Figure10.png', dpi = 300)
plt.show()
|
flexible
|
{
"blob_id": "e77c855ba87bc36ab09b0a3eca5c1b7123535794",
"index": 2802,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\n<mask token>\nplt.rcParams.update(params)\n<mask token>\nfig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,\n fontweight='bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=\n 'vertical', fontsize=35, fontweight='bold')\n<mask token>\nfor n in range(6):\n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +\n 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which='minor', axis='y', alpha=0.2)\n ax1.grid(which='minor', axis='x', alpha=0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',\n fontsize=25)\n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,\n :, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'\n )\n ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')\n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which='minor', axis='y', alpha=0.2)\n ax2.grid(which='minor', axis='x', alpha=0.2)\n ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)\n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,\n :, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which='minor', axis='y', alpha=0.2)\n ax3.grid(which='minor', axis='x', alpha=0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',\n fontsize=25)\n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation='vertical', label=\n 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n i = i + 4\nplt.savefig('Figure10.png', dpi=300)\nplt.show()\n",
"step-3": "<mask token>\ncaliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy'\n )\ncaliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')\nso2_ash = np.load(\n 'SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nso2_only = np.load(\n 'SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nmodel_alts = np.load('Model_altitude.npy')\nmodel_alts[0] = 0\nmodel_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')\ncaliop_mask = np.nanmean(caliop, axis=(1, 2))\nmask = np.ones((181, 12))\nmask[np.isnan(caliop_mask)] = np.nan\nso2_ash_masked = np.zeros((181, 85, 12))\nso2_only_masked = np.zeros((181, 85, 12))\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\nmasked_tph = model_tph * mask\nalts1 = np.linspace(-500, 20200, 346)\nalts2 = np.linspace(20380, 29740, 53)\ncaliop_alts = np.hstack((alts1, alts2)) / 1000\nlatitude = range(-90, 91)\nmonths = calendar.month_name[6:13] + calendar.month_name[1:6]\ncaliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis=2)\ncaliop_monthly_tph = np.nanmean(caliop_tph, axis=1)\nparams = {'legend.fontsize': 25, 'axes.labelsize': 30, 'axes.titlesize': 35,\n 'axes.linewidth': 3, 'axes.grid': True, 'xtick.labelsize': 25,\n 'ytick.labelsize': 25, 'xtick.major.size': 8, 'xtick.minor.size': 5,\n 'xtick.minor.visible': True, 'ytick.major.size': 8, 'ytick.minor.size':\n 5, 'ytick.minor.visible': True, 'lines.linewidth': 4}\nplt.rcParams.update(params)\nfig = plt.figure(figsize=(37, 38))\ngs = fig.add_gridspec(6, 4, width_ratios=[25, 25, 25, 5])\nfig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,\n fontweight='bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=\n 'vertical', fontsize=35, fontweight='bold')\ncol_map = mpl_cm.get_cmap('plasma')\nlvs = np.linspace(0, 1.2, 13)\nnorm = colors.BoundaryNorm(lvs, col_map.N)\ni = 1\nfor n in range(6):\n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +\n 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which='minor', axis='y', alpha=0.2)\n ax1.grid(which='minor', axis='x', alpha=0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',\n fontsize=25)\n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,\n :, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'\n )\n ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')\n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which='minor', axis='y', alpha=0.2)\n ax2.grid(which='minor', axis='x', alpha=0.2)\n ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)\n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,\n :, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which='minor', axis='y', alpha=0.2)\n ax3.grid(which='minor', axis='x', alpha=0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',\n fontsize=25)\n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation='vertical', label=\n 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n i = i + 4\nplt.savefig('Figure10.png', dpi=300)\nplt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport calendar\nimport matplotlib.colors as colors\nimport matplotlib.cm as mpl_cm\ncaliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy'\n )\ncaliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')\nso2_ash = np.load(\n 'SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nso2_only = np.load(\n 'SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy'\n )\nmodel_alts = np.load('Model_altitude.npy')\nmodel_alts[0] = 0\nmodel_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')\ncaliop_mask = np.nanmean(caliop, axis=(1, 2))\nmask = np.ones((181, 12))\nmask[np.isnan(caliop_mask)] = np.nan\nso2_ash_masked = np.zeros((181, 85, 12))\nso2_only_masked = np.zeros((181, 85, 12))\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\nmasked_tph = model_tph * mask\nalts1 = np.linspace(-500, 20200, 346)\nalts2 = np.linspace(20380, 29740, 53)\ncaliop_alts = np.hstack((alts1, alts2)) / 1000\nlatitude = range(-90, 91)\nmonths = calendar.month_name[6:13] + calendar.month_name[1:6]\ncaliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis=2)\ncaliop_monthly_tph = np.nanmean(caliop_tph, axis=1)\nparams = {'legend.fontsize': 25, 'axes.labelsize': 30, 'axes.titlesize': 35,\n 'axes.linewidth': 3, 'axes.grid': True, 'xtick.labelsize': 25,\n 'ytick.labelsize': 25, 'xtick.major.size': 8, 'xtick.minor.size': 5,\n 'xtick.minor.visible': True, 'ytick.major.size': 8, 'ytick.minor.size':\n 5, 'ytick.minor.visible': True, 'lines.linewidth': 4}\nplt.rcParams.update(params)\nfig = plt.figure(figsize=(37, 38))\ngs = fig.add_gridspec(6, 4, width_ratios=[25, 25, 25, 5])\nfig.text(0.5, 0.08, 'Latitude', ha='center', va='center', fontsize=35,\n fontweight='bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha='center', va='center', rotation=\n 'vertical', fontsize=35, fontweight='bold')\ncol_map = mpl_cm.get_cmap('plasma')\nlvs = np.linspace(0, 1.2, 13)\nnorm = colors.BoundaryNorm(lvs, col_map.N)\ni = 1\nfor n in range(6):\n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n +\n 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax1.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which='minor', axis='y', alpha=0.2)\n ax1.grid(which='minor', axis='x', alpha=0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n + 1], fontweight='bold',\n fontsize=25)\n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:,\n :, n + 1] * 100000), cmap=col_map, levels=lvs, norm=norm, extend='both'\n )\n ax2.plot(latitude, caliop_monthly_tph[:, n + 1], linewidth=4, color='k')\n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which='minor', axis='y', alpha=0.2)\n ax2.grid(which='minor', axis='x', alpha=0.2)\n ax2.set_title('CALIOP ' + months[n + 1], fontweight='bold', fontsize=25)\n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:,\n :, n + 1] * 100), cmap=col_map, levels=lvs, norm=norm, extend='both')\n ax3.plot(latitude, masked_tph[:, n + 1] / 1000, linewidth=4, color='k')\n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which='minor', axis='y', alpha=0.2)\n ax3.grid(which='minor', axis='x', alpha=0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n + 1], fontweight='bold',\n fontsize=25)\n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation='vertical', label=\n 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n i = i + 4\nplt.savefig('Figure10.png', dpi=300)\nplt.show()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 1 17:08:06 2023\n\n@author: Alice Wells\n\nPlotting script for Figure 10 in Wells et al., 2023\n\nAerosol extinction coefficient vertical profile averaged longitudinally. \nAveraged monthly CALIOP (centre) aerosol extinction coefficient vertical \nprofiles (night retrievals only) with monthly average tropopause height \n(solid black). UKESM1 SO2 only (left) and SO2+ash (right) simulations with \nimposed CALIOP minimum retrieval limits and mask. \n\n\"\"\"\n# =============================================================================\n# Import functions\n# =============================================================================\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport calendar\nimport matplotlib.colors as colors\nimport matplotlib.cm as mpl_cm\n\n# =============================================================================\n# Load data\n# =============================================================================\n\n#CALIOP observations\ncaliop = np.load('caliop_perturbation_daily_zonal_average_extinction_532nm.npy')\n#CALIOP tropopause height \ncaliop_tph = np.load('calipso_daily_zonal_average_tropopause_height.npy')\n#Model SO2+ash with CALIOP limits imposed\nso2_ash = np.load('SO2_ash_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')\n#Model SO2only with CALIOP limits imposed\nso2_only = np.load('SO2_only_perturbation_monthly_zonal_average_extinction_caliop_limits_532nm_1x1deg.npy')\n#Model altitude profile\nmodel_alts = np.load('Model_altitude.npy')\nmodel_alts[0] = 0\n#Model tropopause height\nmodel_tph = np.load('Model_monthly_zonal_average_tropopause_height.npy')\n\n# =============================================================================\n# Create the caliop model mask\n# =============================================================================\n\n#Find model points only where calipso data exists\ncaliop_mask = np.nanmean(caliop, axis = (1,2))\nmask = np.ones( (181, 12) )\nmask[np.isnan(caliop_mask)] = np.nan\n\n#Mask the model data\nso2_ash_masked = np.zeros( (181, 85, 12) )\nso2_only_masked = np.zeros( (181, 85, 12) )\nfor i in range(85):\n so2_ash_masked[:, i, :] = so2_ash[:, i, :] * mask\n so2_only_masked[:, i, :] = so2_only[:, i, :] * mask\n\nmasked_tph = model_tph * mask \n\n# =============================================================================\n# Define altitude profile\n# =============================================================================\n \nalts1 = np.linspace(-500, 20200, 346)\nalts2 = np.linspace(20380, 29740, 53)\ncaliop_alts = np.hstack( (alts1, alts2) )/1000\n \n#Define latitude coordinates\nlatitude = range(-90, 91)\n#Create months for plotting dates\nmonths = calendar.month_name[6:13] + calendar.month_name[1:6]\n\n#Calculate monthly average for CALIOP\ncaliop_monthly_mean = np.nanmean(caliop[:, :, :, :], axis = 2)\ncaliop_monthly_tph = np.nanmean(caliop_tph, axis = 1)\n\n# =============================================================================\n# Plotting\n# =============================================================================\n\nparams = {'legend.fontsize': 25,\n 'axes.labelsize': 30,\n 'axes.titlesize':35,\n 'axes.linewidth':3,\n 'axes.grid': True,\n 'xtick.labelsize':25,\n 'ytick.labelsize':25,\n 'xtick.major.size': 8,\n 'xtick.minor.size': 5,\n 'xtick.minor.visible':True,\n 'ytick.major.size':8,\n 'ytick.minor.size':5,\n 'ytick.minor.visible':True,\n 'lines.linewidth': 4} \n\nplt.rcParams.update(params)\n\nfig = plt.figure(figsize = (37, 38))\ngs = fig.add_gridspec(6, 4, width_ratios = [25, 25, 25, 5])\n\nfig.text(0.5, 0.08, 'Latitude', ha = 'center', va = 'center', fontsize = 35, fontweight = 'bold')\nfig.text(0.08, 0.5, 'Altitude [km]', ha = 'center', va = 'center', rotation = 'vertical', fontsize = 35, fontweight = 'bold')\n\ncol_map = mpl_cm.get_cmap('plasma')\nlvs = np.linspace(0, 1.2, 13)\nnorm = colors.BoundaryNorm(lvs, col_map.N)\n\ni = 1\n\nfor n in range(6):\n \n ax1 = fig.add_subplot(gs[n, 0])\n ax1.contourf(latitude, model_alts, np.transpose(so2_only_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')\n ax1.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')\n\n ax1.set_xlim([25, 85])\n ax1.set_ylim([5, 20])\n ax1.grid(which = 'minor', axis = 'y', alpha = 0.2)\n ax1.grid(which = 'minor', axis = 'x', alpha = 0.2)\n ax1.set_title('UKESM1 SO2only ' + months[n+1], fontweight = 'bold', fontsize = 25)\n \n ax2 = fig.add_subplot(gs[n, 1])\n ax2.contourf(latitude, caliop_alts, np.transpose(caliop_monthly_mean[:, :, n+1]*100000), cmap = col_map, levels = lvs, norm = norm, extend = 'both')\n ax2.plot(latitude, caliop_monthly_tph[:, n+1], linewidth = 4, color = 'k') \n \n ax2.set_xlim([25, 85])\n ax2.set_ylim([5, 20])\n ax2.grid(which = 'minor', axis = 'y', alpha = 0.2)\n ax2.grid(which = 'minor', axis = 'x', alpha = 0.2) \n ax2.set_title('CALIOP ' + months[n+1], fontweight = 'bold', fontsize = 25)\n \n ax3 = fig.add_subplot(gs[n, 2])\n cb = ax3.contourf(latitude, model_alts, np.transpose(so2_ash_masked[:, :, n+1]*100), cmap = col_map, levels = lvs, norm = norm, extend = 'both')\n ax3.plot(latitude, masked_tph[:, n+1]/1000, linewidth = 4, color = 'k')\n \n ax3.set_xlim([25, 85])\n ax3.set_ylim([5, 20])\n ax3.grid(which = 'minor', axis = 'y', alpha = 0.2)\n ax3.grid(which = 'minor', axis = 'x', alpha = 0.2)\n ax3.set_title('UKESM1 SO2+ash ' + months[n+1], fontweight = 'bold', fontsize = 25)\n \n cax = fig.add_subplot(gs[:, -1])\n plt.colorbar(cb, cax=cax, orientation = 'vertical', label = 'Aerosol extinction coefficient [$x10^{-2}$ km$^{-1}$]')\n \n i = i + 4\n\nplt.savefig('Figure10.png', dpi = 300)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
print("Кулик Валерія Максимівна\n "
"Лабораторна робота №2 \n "
"Варіант 10 \n "
"Завдання №1. Обчислити формулу")
def int_input(text):
while True:
user_input = input(text)
if re.match('^[0-9]{1,}$', user_input):
break
else:
print("Помилка")
continue
return int(user_input)
def float_input(text):
while True:
user_input = input(text)
if re.match('^[-+]{0,1}[0-9]{1,}(\.[0-9]{1,}){0,1}$', user_input):
break
else:
print("Помилка")
continue
return float(user_input)
n = int_input('Введіть n : ')
x = float_input('Введіть x : ')
res = 1
for i in range(1, n):
res = res * ((x + i) / i*i)
print('Результат : ', res)
|
normal
|
{
"blob_id": "e8e52cd0a0685e827ecbc6272657de5158fa0d94",
"index": 7037,
"step-1": "import re\n\nprint(\"Кулик Валерія Максимівна\\n \"\n \"Лабораторна робота №2 \\n \"\n \"Варіант 10 \\n \"\n \"Завдання №1. Обчислити формулу\")\n\n\ndef int_input(text):\n while True:\n user_input = input(text)\n if re.match('^[0-9]{1,}$', user_input):\n break\n else:\n print(\"Помилка\")\n continue\n return int(user_input)\n\n\ndef float_input(text):\n while True:\n user_input = input(text)\n if re.match('^[-+]{0,1}[0-9]{1,}(\\.[0-9]{1,}){0,1}$', user_input):\n break\n else:\n print(\"Помилка\")\n continue\n return float(user_input)\n\n\nn = int_input('Введіть n : ')\nx = float_input('Введіть x : ')\n\nres = 1\nfor i in range(1, n):\n res = res * ((x + i) / i*i)\n\nprint('Результат : ', res)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(a.shape)
print(a)
print('\n')
print(a[0][5])
print(a[1][1])
print(a[1][-6])
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1])
print('\n')
print('even numbers from first row')
print(a[0, 1:8:2])
<|reserved_special_token_0|>
print('new array is ', a)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])
print(a.shape)
print(a)
print('\n')
print(a[0][5])
print(a[1][1])
print(a[1][-6])
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1])
print('\n')
print('even numbers from first row')
print(a[0, 1:8:2])
a[1, 2] = 90
print('new array is ', a)
<|reserved_special_token_1|>
import numpy as np
a = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])
print(a.shape)
print(a)
print('\n')
print(a[0][5])
print(a[1][1])
print(a[1][-6])
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1])
print('\n')
print('even numbers from first row')
print(a[0, 1:8:2])
a[1, 2] = 90
print('new array is ', a)
<|reserved_special_token_1|>
# accessing array elements rows/columns
import numpy as np
a = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])
print(a.shape) # array shape
print(a)
print('\n')
# specific array element [r,c]
# item 6
print(a[0][5])
# item 8
print(a[1][1]) # or
print(a[1][-6])
# get a specific row/specific column
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1]) # prints second column
print('\n')
# get only the even numbers from first row [start_index:end_index:step]
print('even numbers from first row')
print(a[0, 1:8:2])
# change certain value of array
a[1, 2] = 90
print('new array is ',a)
|
flexible
|
{
"blob_id": "8cc97ebe0ff7617eaf31919d40fa6c312d7b6f94",
"index": 8814,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\n<mask token>\nprint('new array is ', a)\n",
"step-3": "<mask token>\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\na[1, 2] = 90\nprint('new array is ', a)\n",
"step-4": "import numpy as np\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape)\nprint(a)\nprint('\\n')\nprint(a[0][5])\nprint(a[1][1])\nprint(a[1][-6])\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1])\nprint('\\n')\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\na[1, 2] = 90\nprint('new array is ', a)\n",
"step-5": "# accessing array elements rows/columns\nimport numpy as np\n\na = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])\nprint(a.shape) # array shape\nprint(a)\nprint('\\n')\n\n# specific array element [r,c]\n# item 6\nprint(a[0][5])\n\n# item 8\nprint(a[1][1]) # or\nprint(a[1][-6])\n\n# get a specific row/specific column\nprint(a[1])\nprint(a[0])\nprint(a[0, :])\nprint(a[:, 1]) # prints second column\nprint('\\n')\n\n# get only the even numbers from first row [start_index:end_index:step]\nprint('even numbers from first row')\nprint(a[0, 1:8:2])\n\n# change certain value of array\na[1, 2] = 90\nprint('new array is ',a)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# DATA TYPES (DATA TİPLERİ)
# STRİNGS (KARAKTER DİZİNLERİ)
# Bir karakter dizinini tanımlamak için tırnaklar kullanılır. birkaç satır ka-
# rakter dizini yazıyorsak 3 tırnak kullanılır:
print("""Üç tırnaklı
karakter
dizinine
örnek""")
üç tırnaklı
karakter
dizinine
örnek
print('Tek tırnak: Tek satırlık stringlerde uygulanır.')
Tek tırnak: Tek satırlık stringlerde uygulanır.
print("Çift Tırnak: Yine tek satırlık cümlelerde kullanılır.")
Çift Tırnak: Yine tek satırlık cümlelerde kullanılır.
# Farklı tırnakların olmasının nedeni, tek tırnakla ayrılan özel isimlerin ayrım
# işaretinin çıktıyı string olarak kabul etmesini önlemek:
print("Türkiye'nin başkenti Ankara'dır.")
Türkiye'nin başkenti Ankara'dır.
# Yukarıdaki gibi bir çıktı almak için çift tırnak ("") kullandım. Çünkü tek
# tırnak kullansam şöyle hatalı bir print kodu oluşurdu:
print('Türkiye'nin başkenti Ankara'dır.') # Python, dğerlerin hatalı olduğunu
# ifade eden renklendirme yapardı.
# Çift tırnakla başlayıp ayraçları tek tırnak yaparsak Python, çift tırnakla
# başladığından dolayı aradaki tek tırnakları görmez ve onu da string içeriği
# olarak kabul eder. Aynı şekilde üç tırnakla başlasaydım bu sefer de çift tır-
# nakları görmeyip onları da string içeriği olarak kabul ederdi.
|
normal
|
{
"blob_id": "61f2fbed184ff6f842ba9527456da453844f8dc6",
"index": 1362,
"step-1": "# DATA TYPES (DATA TİPLERİ)\r\n\r\n# STRİNGS (KARAKTER DİZİNLERİ)\r\n\r\n# Bir karakter dizinini tanımlamak için tırnaklar kullanılır. birkaç satır ka-\r\n# rakter dizini yazıyorsak 3 tırnak kullanılır:\r\nprint(\"\"\"Üç tırnaklı\r\nkarakter\r\ndizinine\r\nörnek\"\"\")\r\nüç tırnaklı\r\nkarakter\r\ndizinine\r\nörnek\r\n\r\nprint('Tek tırnak: Tek satırlık stringlerde uygulanır.')\r\nTek tırnak: Tek satırlık stringlerde uygulanır.\r\nprint(\"Çift Tırnak: Yine tek satırlık cümlelerde kullanılır.\")\r\nÇift Tırnak: Yine tek satırlık cümlelerde kullanılır.\r\n\r\n# Farklı tırnakların olmasının nedeni, tek tırnakla ayrılan özel isimlerin ayrım\r\n# işaretinin çıktıyı string olarak kabul etmesini önlemek:\r\nprint(\"Türkiye'nin başkenti Ankara'dır.\")\r\nTürkiye'nin başkenti Ankara'dır.\r\n# Yukarıdaki gibi bir çıktı almak için çift tırnak (\"\") kullandım. Çünkü tek\r\n# tırnak kullansam şöyle hatalı bir print kodu oluşurdu:\r\nprint('Türkiye'nin başkenti Ankara'dır.') # Python, dğerlerin hatalı olduğunu\r\n# ifade eden renklendirme yapardı.\r\n# Çift tırnakla başlayıp ayraçları tek tırnak yaparsak Python, çift tırnakla\r\n# başladığından dolayı aradaki tek tırnakları görmez ve onu da string içeriği\r\n# olarak kabul eder. Aynı şekilde üç tırnakla başlasaydım bu sefer de çift tır-\r\n# nakları görmeyip onları da string içeriği olarak kabul ederdi.\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# #----------------------------------------#
# 3.4
#
# Question:
# Write a program which can map() to make a list whose elements are square of elements in [1,2,3,4,5,6,7,8,9,10].
#
|
normal
|
{
"blob_id": "8c71bc5d53bf5c4cb20784659eddf8a97efb86ef",
"index": 8336,
"step-1": "#\t#----------------------------------------#\n#\t3.4\n#\t\n#\tQuestion:\n#\tWrite a program which can map() to make a list whose elements are square of elements in [1,2,3,4,5,6,7,8,9,10].\n#\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
<|reserved_special_token_0|>
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete tar archive {0}.'.format(
filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete gz archive {0}.'.format(filepath)
)
return filepath[:-3]
<|reserved_special_token_0|>
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months
[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = 'sidads.colorado.edu'
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + '/' + f, 'wb')
ftp.retrbinary('RETR ' + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',
'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',
'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith('us_ssmv'):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path,
code + yr + mo + dy + filename[-4:]))
except:
pass
<|reserved_special_token_0|>
def totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003,
2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=
'H:/GIS/SNODAS/SNODASproj.gdb/'):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',
'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':
'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
for m in range(monthRange[0], monthRange[1] + 1):
g[code + '0000' + str(m).zfill(2)] = []
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1)
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]
) == m:
g[code + '0000' + str(m).zfill(2)].append(rast)
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],
statistics_type=statistics, ignore_nodata='DATA')
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],
'PLE': [0, 0, 0, 1]}
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:]
h = f.split('\\')[1].split('.')[2][1:3]
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:]
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:]
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=
f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
arcpy.CheckOutExtension('Spatial')
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension('spatial')
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[
10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:
11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1):
for m in range(monthRange[0], monthRange[-1] + 1):
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',
outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange
=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension('spatial')
for y in range(yearRange[0], yearRange[-1] + 1):
for m in range(monthRange[0], monthRange[-1] + 1):
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete tar archive {0}.'.format(
filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete gz archive {0}.'.format(filepath)
)
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
HDRFILE_STRING = """byteorder M
layout bil
nbands 1
nbits 16
ncols 6935
nrows 3351
ulxmap -124.729583333331703
ulymap 52.871249516804028
xdim 0.00833333333
ydim 0.00833333333
"""
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months
[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = 'sidads.colorado.edu'
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + '/' + f, 'wb')
ftp.retrbinary('RETR ' + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',
'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',
'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith('us_ssmv'):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path,
code + yr + mo + dy + filename[-4:]))
except:
pass
<|reserved_special_token_0|>
def totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003,
2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=
'H:/GIS/SNODAS/SNODASproj.gdb/'):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',
'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':
'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
for m in range(monthRange[0], monthRange[1] + 1):
g[code + '0000' + str(m).zfill(2)] = []
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1)
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]
) == m:
g[code + '0000' + str(m).zfill(2)].append(rast)
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],
statistics_type=statistics, ignore_nodata='DATA')
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],
'PLE': [0, 0, 0, 1]}
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:]
h = f.split('\\')[1].split('.')[2][1:3]
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:]
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:]
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=
f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
arcpy.CheckOutExtension('Spatial')
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension('spatial')
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[
10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:
11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1):
for m in range(monthRange[0], monthRange[-1] + 1):
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',
outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange
=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension('spatial')
for y in range(yearRange[0], yearRange[-1] + 1):
for m in range(monthRange[0], monthRange[-1] + 1):
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete tar archive {0}.'.format(
filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete gz archive {0}.'.format(filepath)
)
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
HDRFILE_STRING = """byteorder M
layout bil
nbands 1
nbits 16
ncols 6935
nrows 3351
ulxmap -124.729583333331703
ulymap 52.871249516804028
xdim 0.00833333333
ydim 0.00833333333
"""
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months
[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = 'sidads.colorado.edu'
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + '/' + f, 'wb')
ftp.retrbinary('RETR ' + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',
'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',
'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith('us_ssmv'):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path,
code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics='SUM', outcellsize='1000',
monthRange='', yearRange='', path='H:/GIS/SNODAS/SNWDS/', outpath=
'H:/GIS/SNODAS.gdb/', area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',
'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':
'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1):
for m in range(monthRange[0], monthRange[1] + 1):
g[code + str(y) + str(m).zfill(2)] = []
for name in sorted(glob.glob(path + code + '*.tif')):
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]
) == m:
g[code + str(y) + str(m).zfill(2)].append(rast)
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2
)], statistics_type=statistics, ignore_nodata='DATA')
div = Divide(cellstats, scalingFactor)
calc = Con(div < 0.0, 0.0, div)
ifnull = Con(IsNull(calc), 0, calc)
outCS = arcpy.SpatialReference(102039)
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2
) + statstype[statistics]
memoryFeature = 'in_memory/myMemoryFeature'
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS,
'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management('in_memory')
def totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003,
2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=
'H:/GIS/SNODAS/SNODASproj.gdb/'):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',
'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':
'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
for m in range(monthRange[0], monthRange[1] + 1):
g[code + '0000' + str(m).zfill(2)] = []
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1)
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]
) == m:
g[code + '0000' + str(m).zfill(2)].append(rast)
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],
statistics_type=statistics, ignore_nodata='DATA')
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = (
'http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/'
)
dir_path = 'Y{:}/M{:}/'.format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), 'lxml')
hdf_name = soup.find_all('', {'href': re.compile(
'MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.
IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text,
save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],
'PLE': [0, 0, 0, 1]}
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:]
h = f.split('\\')[1].split('.')[2][1:3]
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:]
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:]
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=
f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
arcpy.CheckOutExtension('Spatial')
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension('spatial')
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[
10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:
11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1):
for m in range(monthRange[0], monthRange[-1] + 1):
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',
outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange
=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension('spatial')
for y in range(yearRange[0], yearRange[-1] + 1):
for m in range(monthRange[0], monthRange[-1] + 1):
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete tar archive {0}.'.format(
filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception('Could not delete gz archive {0}.'.format(filepath)
)
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
HDRFILE_STRING = """byteorder M
layout bil
nbands 1
nbits 16
ncols 6935
nrows 3351
ulxmap -124.729583333331703
ulymap 52.871249516804028
xdim 0.00833333333
ydim 0.00833333333
"""
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months
[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = 'sidads.colorado.edu'
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + '/' + f, 'wb')
ftp.retrbinary('RETR ' + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',
'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',
'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith('us_ssmv'):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path,
code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics='SUM', outcellsize='1000',
monthRange='', yearRange='', path='H:/GIS/SNODAS/SNWDS/', outpath=
'H:/GIS/SNODAS.gdb/', area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',
'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':
'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1):
for m in range(monthRange[0], monthRange[1] + 1):
g[code + str(y) + str(m).zfill(2)] = []
for name in sorted(glob.glob(path + code + '*.tif')):
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]
) == m:
g[code + str(y) + str(m).zfill(2)].append(rast)
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2
)], statistics_type=statistics, ignore_nodata='DATA')
div = Divide(cellstats, scalingFactor)
calc = Con(div < 0.0, 0.0, div)
ifnull = Con(IsNull(calc), 0, calc)
outCS = arcpy.SpatialReference(102039)
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2
) + statstype[statistics]
memoryFeature = 'in_memory/myMemoryFeature'
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS,
'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management('in_memory')
def totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003,
2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=
'H:/GIS/SNODAS/SNODASproj.gdb/'):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',
'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':
'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
for m in range(monthRange[0], monthRange[1] + 1):
g[code + '0000' + str(m).zfill(2)] = []
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1)
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]
) == m:
g[code + '0000' + str(m).zfill(2)].append(rast)
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],
statistics_type=statistics, ignore_nodata='DATA')
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "afb09f9d5860994f38e8553b19e7ebc339cc2df6",
"index": 8785,
"step-1": "<mask token>\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\n<mask token>\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\n<mask token>\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\n<mask token>\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],\n 'PLE': [0, 0, 0, 1]}\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n v = f.split('\\\\')[1].split('.')[2][-2:]\n h = f.split('\\\\')[1].split('.')[2][1:3]\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:]\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:]\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=\n f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n arcpy.CheckOutExtension('Spatial')\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n arcpy.env.mask = area\n arcpy.CheckOutExtension('spatial')\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[\n 10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:\n 11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',\n outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange\n =[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension('spatial')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n HDRFILE_STRING = \"\"\"byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n\"\"\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\n<mask token>\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],\n 'PLE': [0, 0, 0, 1]}\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n v = f.split('\\\\')[1].split('.')[2][-2:]\n h = f.split('\\\\')[1].split('.')[2][1:3]\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:]\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:]\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=\n f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n arcpy.CheckOutExtension('Spatial')\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n arcpy.env.mask = area\n arcpy.CheckOutExtension('spatial')\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[\n 10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:\n 11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',\n outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange\n =[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension('spatial')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n HDRFILE_STRING = \"\"\"byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n\"\"\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\ndef snow_summary(code, scalingFactor, statistics='SUM', outcellsize='1000',\n monthRange='', yearRange='', path='H:/GIS/SNODAS/SNWDS/', outpath=\n 'H:/GIS/SNODAS.gdb/', area=''):\n \"\"\"\n summarizes daily SNODAS data to monthly values\n\n INPUT\n -----\n code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'\n scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/\n statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,\n RANGE, STD, SUM, or VARIETY\n monthRange = len 2 list; begin and end month of data you wish to analyze\n yearRange = len 2 list; bengin and end year of data you wish to analyze\n path = directory where raw geoTiffs are located\n outpath = directory where final data will be stored\n\n OUTPUT\n ------\n projected and scaled monthly rasters\n\n \"\"\"\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n for y in range(yearRange[0], yearRange[1] + 1):\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + str(y) + str(m).zfill(2)] = []\n for name in sorted(glob.glob(path + code + '*.tif')):\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]\n ) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2\n )], statistics_type=statistics, ignore_nodata='DATA')\n div = Divide(cellstats, scalingFactor)\n calc = Con(div < 0.0, 0.0, div)\n ifnull = Con(IsNull(calc), 0, calc)\n outCS = arcpy.SpatialReference(102039)\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2\n ) + statstype[statistics]\n memoryFeature = 'in_memory/myMemoryFeature'\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS,\n 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management('in_memory')\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_modis(tiles, save_path, months='', years=''):\n \"\"\"The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.\n\n :param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n :param save_path: name of output file name\n :param months: months of interest; defaults to [1,12]\n :param years: years of interest; defaults to [2000,2015]\n :return: saves files in outpath\n \"\"\"\n from bs4 import BeautifulSoup\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for tile in tiles:\n for yr in yrs:\n for m in mons:\n base_url = (\n 'http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/'\n )\n dir_path = 'Y{:}/M{:}/'.format(yr, m)\n url = base_url + dir_path\n soup = BeautifulSoup(urllib2.urlopen(url), 'lxml')\n hdf_name = soup.find_all('', {'href': re.compile(\n 'MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.\n IGNORECASE)})\n files = urllib.urlretrieve(url + hdf_name[0].text, \n save_path + hdf_name[0].text)\n print(save_path + hdf_name[0].text)\n time.sleep(0.5)\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0],\n 'PLE': [0, 0, 0, 1]}\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n v = f.split('\\\\')[1].split('.')[2][-2:]\n h = f.split('\\\\')[1].split('.')[2][1:3]\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:]\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:]\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=\n f, prefix=pref, subset=matrdir[data_type], res=1000, epsg=proj)\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n arcpy.CheckOutExtension('Spatial')\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n arcpy.env.mask = area\n arcpy.CheckOutExtension('spatial')\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[\n 10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:\n 11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c',\n outCS, '16_BIT_UNSIGNED', '1000', '1', 'LAST', 'LAST')\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange\n =[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension('spatial')\n for y in range(yearRange[0], yearRange[-1] + 1):\n for m in range(monthRange[0], monthRange[-1] + 1):\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete tar archive {0}.'.format(\n filepath))\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import gzip\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception('Could not delete gz archive {0}.'.format(filepath)\n )\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n HDRFILE_STRING = \"\"\"byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n\"\"\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',\n 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [(str(i).zfill(2) + '_' + monnames[i - 1]) for i in range(months\n [0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n for yr in yrs:\n for m in mons:\n ftp_addr = 'sidads.colorado.edu'\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n dir_path = 'pub/DATASETS/NOAA/G02158/masked/' + yr + '/' + m + '/'\n ftp.cwd(dir_path)\n files = ftp.nlst()\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + '/' + f, 'wb')\n ftp.retrbinary('RETR ' + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML',\n 'us_ssmv11050lL00T': 'SPSB', 'us_ssmv11034tS__T': 'SWEQ',\n 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n for filename in os.listdir(path):\n if filename.startswith('us_ssmv'):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, \n code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\ndef snow_summary(code, scalingFactor, statistics='SUM', outcellsize='1000',\n monthRange='', yearRange='', path='H:/GIS/SNODAS/SNWDS/', outpath=\n 'H:/GIS/SNODAS.gdb/', area=''):\n \"\"\"\n summarizes daily SNODAS data to monthly values\n\n INPUT\n -----\n code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'\n scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/\n statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,\n RANGE, STD, SUM, or VARIETY\n monthRange = len 2 list; begin and end month of data you wish to analyze\n yearRange = len 2 list; bengin and end year of data you wish to analyze\n path = directory where raw geoTiffs are located\n outpath = directory where final data will be stored\n\n OUTPUT\n ------\n projected and scaled monthly rasters\n\n \"\"\"\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n for y in range(yearRange[0], yearRange[1] + 1):\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + str(y) + str(m).zfill(2)] = []\n for name in sorted(glob.glob(path + code + '*.tif')):\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]\n ) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2\n )], statistics_type=statistics, ignore_nodata='DATA')\n div = Divide(cellstats, scalingFactor)\n calc = Con(div < 0.0, 0.0, div)\n ifnull = Con(IsNull(calc), 0, calc)\n outCS = arcpy.SpatialReference(102039)\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2\n ) + statstype[statistics]\n memoryFeature = 'in_memory/myMemoryFeature'\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS,\n 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management('in_memory')\n\n\ndef totalavg(code, statistics='MEAN', monthRange=[1, 12], yearRange=[2003, \n 2016], path='H:/GIS/SNODAS/SNODASproj.gdb/', outpath=\n 'H:/GIS/SNODAS/SNODASproj.gdb/'):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX',\n 'MEDIAN': 'MED', 'MINIMUM': 'MIN', 'MINORITY': 'MNR', 'RANGE':\n 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n for m in range(monthRange[0], monthRange[1] + 1):\n g[code + '0000' + str(m).zfill(2)] = []\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1)\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]\n ) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast)\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)],\n statistics_type=statistics, ignore_nodata='DATA')\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\n\n<mask token>\n",
"step-5": "\"\"\"\nThese are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport time\nimport urllib\ntry:\n # For Python 3.0 and later\n import urllib.request\nexcept ImportError:\n # Fall back to Python 2's urllib2\n import urllib2\n\nimport re\nimport glob\nimport os\nimport arcpy\nfrom arcpy.sa import *\n\n\ndef get_modis(tiles, save_path, months='', years=''):\n \"\"\"The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.\n\n :param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n :param save_path: name of output file name\n :param months: months of interest; defaults to [1,12]\n :param years: years of interest; defaults to [2000,2015]\n :return: saves files in outpath\n \"\"\"\n\n\n from bs4 import BeautifulSoup\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n\n mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n\n for tile in tiles:\n for yr in yrs:\n for m in mons:\n base_url = \"http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/\"\n\n dir_path = \"Y{:}/M{:}/\".format(yr, m)\n url = base_url + dir_path\n soup = BeautifulSoup(urllib2.urlopen(url), \"lxml\")\n hdf_name = soup.find_all('', {\n 'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})\n files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)\n print(save_path + hdf_name[0].text)\n time.sleep(0.5)\n\n\ndef get_file_list(save_path, wld='*.105*.hdf'):\n \"\"\"\n\n Args:\n save_path: path to folder where raw MODIS files are\n wld: common wildcard in all of the raw MODIS files\n\n Returns:\n list of files to analyze in the raw folder\n\n \"\"\"\n return glob.glob(os.path.join(save_path, wld))\n\n\ndef reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):\n \"\"\"Iterates through MODIS files in a folder reprojecting them.\n\n Takes the crazy MODIS sinusoidal projection to a user defined projection.\n\n Args:\n files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))\n save_path: folder to store the reprojected files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n eight_day: time span of modis file; Bool where default is true (input 8-day rasters)\n proj: projection of output data by epsg number; default is nad83 zone 12\n Returns:\n Reprojected MODIS files\n\n ..notes:\n The EPSG code for NAD83 Zone 12 is 26912.\n The EPSG code for Albers Equal Area is 102003\n http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf\n https://modis-land.gsfc.nasa.gov/MODLAND_grid.html\n https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<\n https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125\n \"\"\"\n import pymodis\n # dictionary to designate a directory\n datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}\n # dictionary to select layer from hdf file that contains the datatype\n matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}\n\n # check for file folder and make it if it doesn't exist\n if not os.path.exists(save_path + datadir[data_type]):\n os.makedirs(save_path + datadir[data_type])\n print('created {:}'.format(save_path + datadir[data_type]))\n\n for f in files:\n year = f.split('\\\\')[1].split('.')[1][1:5]\n\n v = f.split('\\\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename\n h = f.split('\\\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename\n\n # names file based on time span of input rasters; 8-day by default\n if eight_day:\n doy = f.split('\\\\')[1].split('.')[1][-3:] # parse day of year from hdf filename\n fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n else:\n month = f.split('\\\\')[1].split('.')[1][-2:] # parse month from hdf filename\n fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v\n pref = os.path.join(save_path + datadir[data_type] + fname)\n\n convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,\n subset=matrdir[data_type],\n res=1000, epsg=proj)\n # [ET,LE,PET,PLE]\n try:\n convertsingle.run()\n except:\n print(fname + ' failed!')\n pass\n\n\ndef clip_and_fix(path, outpath, data_type, area=''):\n \"\"\"Clips raster to Utah's Watersheds and makes exception values null.\n\n Args:\n path: folder of the reprojected MODIS files\n outpath: ESRI gdb to store the clipped files\n data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'\n area: path to polygon used to clip tiles\n\n \"\"\"\n # Check out the ArcGIS Spatial Analyst extension license\n arcpy.CheckOutExtension(\"Spatial\")\n\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n\n arcpy.env.mask = area\n arcpy.CheckOutExtension(\"spatial\")\n for rast in arcpy.ListRasters():\n calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))\n calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])\n print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])\n\n\ndef merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):\n \"\"\"Mosaics (merges) different MODIS cells into one layer.\n\n\n \"\"\"\n if monthRange == '':\n monthRange = [1, 12]\n if yearRange == '':\n yearRange = [2000, 2015]\n if outpath == '':\n outpath = path\n\n arcpy.env.workspace = path\n outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')\n for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here\n nm = data_type + str(y) + str(m).zfill(2)\n rlist = []\n for rast in arcpy.ListRasters(nm + '*'):\n rlist.append(rast)\n try:\n arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \\\n \"16_BIT_UNSIGNED\", \"1000\", \"1\", \"LAST\", \"LAST\")\n\n print(path + nm + 'c')\n except:\n print(nm + ' failed!')\n pass\n\n\ndef scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):\n \"\"\"\n\n :param path: directory to unconverted modis tiles\n :param out_path: directory to put output in\n :param scaleby: scaling factor for MODIS data; default converts to meters/month\n :param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'\n :param monthRange: range of months to process data\n :param yearRange: range of years to process data\n :return:\n \"\"\"\n arcpy.CheckOutExtension(\"spatial\")\n\n for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here\n nm = data_type + str(y) + str(m).zfill(2)\n calc = Divide(nm + 'c', scaleby)\n calc.save(out_path + nm)\n\n\ndef untar(filepath, outfoldername='.', compression='r', deletesource=False):\n \"\"\"\n Given an input tar archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n import tarfile\n\n with tarfile.open(filepath, compression) as tfile:\n filelist = tfile.getnames()\n tfile.extractall(path=outfoldername)\n\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception(\"Could not delete tar archive {0}.\".format(filepath))\n\n return filelist\n\n\ndef ungz(filepath, compression='rb', deletesource=False):\n \"\"\"\n Given an input gz archive filepath, extracts the files.\n Required: filepath -- the path to the tar archive\n Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive\n compression -- the type of compression used in the archive; DEFAULT is 'r'; use \"r:gz\" for gzipped archives\n deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false\n Output: filelist -- the list of all extract files\n \"\"\"\n\n import gzip\n\n with gzip.open(filepath, compression) as f:\n outF = open(filepath[:-3], 'wb')\n outF.write(f.read())\n f.close()\n outF.close()\n if deletesource:\n try:\n os.remove(filepath)\n except:\n raise Exception(\"Could not delete gz archive {0}.\".format(filepath))\n\n return filepath[:-3]\n\n\ndef replace_hdr_file(hdrfile):\n \"\"\"\n Replace the .hdr file for a .bil raster with the correct data for Arc processing\n Required: hdrfile -- filepath for .hdr file to replace/create\n Output: None\n \"\"\"\n # hdr file replacment string\n HDRFILE_STRING = \"byteorder M\\nlayout bil\\nnbands 1\\nnbits 16\\nncols 6935\\nnrows 3351\\n\\\n ulxmap -124.729583333331703\\nulymap 52.871249516804028\\nxdim 0.00833333333\\nydim 0.00833333333\\n\"\n with open(hdrfile, 'w') as o:\n o.write(HDRFILE_STRING)\n\n\ndef get_snodas(out_dir, months='', years=''):\n \"\"\"Downloads daily SNODAS data from ftp. This is slow.\n\n :param out_dir: directory to store downloaded SNODAS zip files\n :param months: months desired for download\n :param years: years desired for download\n :return: saved zip files in out_dir\n\n .. note:\n Use polaris: http://nsidc.org/data/polaris/\n \"\"\"\n import ftplib\n\n if months == '':\n months = [1, 12]\n if years == '':\n years = [2000, 2015]\n\n monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n mons = [str(i).zfill(2) + \"_\" + monnames[i - 1] for i in range(months[0], months[1] + 1)]\n\n yrs = [str(i) for i in range(years[0], years[1] + 1)]\n\n for yr in yrs:\n for m in mons:\n ftp_addr = \"sidads.colorado.edu\"\n ftp = ftplib.FTP(ftp_addr)\n ftp.login()\n\n dir_path = \"pub/DATASETS/NOAA/G02158/masked/\" + yr + \"/\" + m + \"/\"\n ftp.cwd(dir_path)\n files = ftp.nlst()\n\n for f in files:\n if len(f) > 4:\n save_file = open(out_dir + \"/\" + f, 'wb')\n ftp.retrbinary(\"RETR \" + f, save_file.write)\n save_file.close()\n print(f)\n ftp.close()\n\n\ndef rename_polaris_snodas(path):\n prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',\n 'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',\n 'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}\n\n for filename in os.listdir(path):\n if filename.startswith(\"us_ssmv\"):\n code = prodcode[filename[0:17]]\n yrsrt = filename.find('TNATS') + 5\n yr = filename[yrsrt:yrsrt + 4]\n mo = filename[yrsrt + 4:yrsrt + 6]\n dy = filename[yrsrt + 6:yrsrt + 8]\n try:\n os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))\n except:\n pass\n\n\ndef snow_summary(code, scalingFactor, statistics=\"SUM\", outcellsize='1000', monthRange='', yearRange='',\n path=\"H:/GIS/SNODAS/SNWDS/\", outpath=\"H:/GIS/SNODAS.gdb/\", area=''):\n \"\"\"\n summarizes daily SNODAS data to monthly values\n\n INPUT\n -----\n code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'\n scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/\n statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,\n RANGE, STD, SUM, or VARIETY\n monthRange = len 2 list; begin and end month of data you wish to analyze\n yearRange = len 2 list; bengin and end year of data you wish to analyze\n path = directory where raw geoTiffs are located\n outpath = directory where final data will be stored\n\n OUTPUT\n ------\n projected and scaled monthly rasters\n\n \"\"\"\n if monthRange == '':\n months = [1, 12]\n if yearRange == '':\n years = [2000, 2015]\n\n g = {}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n if area == '':\n area = 'H:/GIS/Calc.gdb/WBD_UT'\n # arcpy.env.mask = area\n\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n\n for y in range(yearRange[0], yearRange[1] + 1): # set years converted here\n for m in range(monthRange[0], monthRange[1] + 1): # set months converted here\n g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year\n for name in sorted(\n glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type\n rast = os.path.basename(name)\n if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:\n g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + str(y) + str(m).zfill(2)]) > 0:\n # print(g[code+str(y)+str(m).zfill(2)])\n # ifnull = 'in_memory/ifnull'\n # arcpy sa functions that summarize the daily data to monthly data\n cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,\n ignore_nodata=\"DATA\")\n div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001\n calc = Con(div < 0.0, 0.0, div) # remove negative and null values\n ifnull = Con(IsNull(calc), 0, calc) # remove null\n # WKID 102039\n outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis\n # define save path for file\n outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]\n memoryFeature = \"in_memory/myMemoryFeature\"\n # memoryFeature = outnm\n arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,\n 'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')\n # Execute ExtractByMask to clip snodas data to Utah watersheds\n extrc = arcpy.sa.ExtractByMask(memoryFeature, area)\n extrc.save(outnm)\n print(outnm)\n arcpy.Delete_management(\"in_memory\")\n\n\ndef totalavg(code, statistics=\"MEAN\", monthRange=[1, 12], yearRange=[2003, 2016],\n path=\"H:/GIS/SNODAS/SNODASproj.gdb/\", outpath=\"H:/GIS/SNODAS/SNODASproj.gdb/\"):\n \"\"\"Summarizes daily raster data into monthly data.\n\n INPUT\n -----\n code = string with four letters represting data type to summarize (example 'BSSB')\n statistics = how data will be summarized; defaults to monthly averages; options are\n ['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']\n Most common are 'MEAN','MEDIAN', and 'SUM'\n These are inputs that will be used in the ArcPy CellStatistics function.\n See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation\n monthRange = beginning and end months of summary statistics\n yearRange = beginning and end years of summary statistics\n path = location of geodatabase of data to summarize\n outpath = location of geodatabase where output data should be stored\n OUTPUT\n ------\n summary raster(s) stored in outpath\n\n \"\"\"\n g = {}\n statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',\n 'MINORITY': 'MNR',\n 'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}\n arcpy.env.workspace = path\n arcpy.env.overwriteOutput = True\n\n # iterate over month range set here; default is 1 to 12 (Jan to Dec)\n for m in range(monthRange[0], monthRange[1] + 1):\n\n # this defines the dictionary key based on data type, month, and year\n g[code + '0000' + str(m).zfill(2)] = []\n\n # pick all tiff files from raw data folder of a data type\n for rast in arcpy.ListRasters():\n yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here\n\n # create a list of rasters with the right code and month and year\n if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:\n g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month\n else:\n pass\n if len(g[code + '0000' + str(m).zfill(2)]) > 0:\n # arcpy sa functions that summarize the daily data to monthly data\n calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata=\"DATA\")\n calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])\n print(code + '0000' + str(m).zfill(2) + statstype[statistics])\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
11,
12,
13,
16
]
}
|
[
6,
11,
12,
13,
16
] |
#!/usr/bin/python
import sys
import random
def has_duplicates(list) :
"""Returns True if there are duplicate in list, false otherwise"""
copy = list[:]
copy.sort()
for item in range(len(list)-1):
if copy[item] == copy[item + 1]:
return True;
return False;
def gen_birthdays(n):
"""returns a list of random bdays of length n"""
list = []
for date in range(n):
list.append(random.randint(1, 365))
return list
def num_matches(students, samples):
"""generates sample bdays for number of students and returns count of how many
had matches"""
count = 0
for i in range(samples):
bday_list = gen_birthdays(students)
if has_duplicates(bday_list):
count += 1
return count
num_students = 23;
num_simulations = 10000
count = num_matches(num_students, num_simulations)
print 'Students: %d' % num_students
print 'Simulations: %d' % num_simulations
print 'Matches: %d' % count
|
normal
|
{
"blob_id": "e34e1e220c6d0fe2dc3d42caaefb04b178cdd120",
"index": 3768,
"step-1": "#!/usr/bin/python\nimport sys\nimport random\n\ndef has_duplicates(list) :\n \"\"\"Returns True if there are duplicate in list, false otherwise\"\"\"\n copy = list[:]\n copy.sort()\n for item in range(len(list)-1):\n if copy[item] == copy[item + 1]:\n return True;\n return False;\n\ndef gen_birthdays(n):\n \"\"\"returns a list of random bdays of length n\"\"\"\n list = []\n for date in range(n):\n list.append(random.randint(1, 365))\n return list\n\ndef num_matches(students, samples):\n \"\"\"generates sample bdays for number of students and returns count of how many\n had matches\"\"\"\n count = 0\n for i in range(samples):\n bday_list = gen_birthdays(students)\n if has_duplicates(bday_list):\n count += 1\n return count\n\nnum_students = 23;\nnum_simulations = 10000\ncount = num_matches(num_students, num_simulations)\n\nprint 'Students: %d' % num_students\nprint 'Simulations: %d' % num_simulations\nprint 'Matches: %d' % count\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from channels.routing import route
from .consumers import message_consumer
channel_routing = [
route("slack.rtm.message", message_consumer)
]
|
normal
|
{
"blob_id": "8439972b4458ba66d98f6a80a82a35576df472a4",
"index": 8096,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nchannel_routing = [route('slack.rtm.message', message_consumer)]\n",
"step-3": "from channels.routing import route\nfrom .consumers import message_consumer\nchannel_routing = [route('slack.rtm.message', message_consumer)]\n",
"step-4": "from channels.routing import route\nfrom .consumers import message_consumer\n\nchannel_routing = [\n route(\"slack.rtm.message\", message_consumer)\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class UserRegistrationForm(forms.Form):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'],
self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user, activation_key=
activation_key, key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True, max_length=60)
primary_contact_first_name = forms.CharField(required=True, max_length=30)
primary_contact_last_name = forms.CharField(required=True, max_length=30)
primary_contact_phone = forms.CharField(required=True, max_length=30)
primary_contact_email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get(
'primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data[
'primary_contact_email'], self.cleaned_data[
'primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[
'business_name'])
new_profile.save()
return new_user
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserRegistrationForm(forms.Form):
first_name = forms.CharField(required=True, max_length=30)
last_name = forms.CharField(required=True, max_length=30)
email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_VOLUNTEER)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get('email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
account_type = int(cleaned_data.get('form_type'))
if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=
UserProfile.ACCOUNT_ORGANIZATION):
raise forms.ValidationError('Invalid account type')
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'],
self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user, activation_key=
activation_key, key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True, max_length=60)
primary_contact_first_name = forms.CharField(required=True, max_length=30)
primary_contact_last_name = forms.CharField(required=True, max_length=30)
primary_contact_phone = forms.CharField(required=True, max_length=30)
primary_contact_email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get(
'primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data[
'primary_contact_email'], self.cleaned_data[
'primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[
'business_name'])
new_profile.save()
return new_user
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MIN_PASSWORD_LENGTH = 8
MAX_PASSWORD_LENGTH = 30
class UserRegistrationForm(forms.Form):
first_name = forms.CharField(required=True, max_length=30)
last_name = forms.CharField(required=True, max_length=30)
email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_VOLUNTEER)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get('email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
account_type = int(cleaned_data.get('form_type'))
if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=
UserProfile.ACCOUNT_ORGANIZATION):
raise forms.ValidationError('Invalid account type')
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'],
self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user, activation_key=
activation_key, key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True, max_length=60)
primary_contact_first_name = forms.CharField(required=True, max_length=30)
primary_contact_last_name = forms.CharField(required=True, max_length=30)
primary_contact_phone = forms.CharField(required=True, max_length=30)
primary_contact_email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get(
'primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data[
'primary_contact_email'], self.cleaned_data[
'primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[
'business_name'])
new_profile.save()
return new_user
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.models import User
from ServicePad.apps.account.models import UserProfile
import hashlib, random, datetime
from ServicePad.apps.registration.models import ActivationKey
MIN_PASSWORD_LENGTH = 8
MAX_PASSWORD_LENGTH = 30
class UserRegistrationForm(forms.Form):
first_name = forms.CharField(required=True, max_length=30)
last_name = forms.CharField(required=True, max_length=30)
email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_VOLUNTEER)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get('email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
account_type = int(cleaned_data.get('form_type'))
if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=
UserProfile.ACCOUNT_ORGANIZATION):
raise forms.ValidationError('Invalid account type')
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'],
self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user, activation_key=
activation_key, key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True, max_length=60)
primary_contact_first_name = forms.CharField(required=True, max_length=30)
primary_contact_last_name = forms.CharField(required=True, max_length=30)
primary_contact_phone = forms.CharField(required=True, max_length=30)
primary_contact_email = forms.EmailField(required=True, max_length=30)
password = forms.CharField(widget=forms.PasswordInput, min_length=
MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,
min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(), initial=
UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
try:
User.objects.get(username__exact=cleaned_data.get(
'primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError('Email already exists')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError('Passwords do not match')
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data[
'primary_contact_email'], self.cleaned_data[
'primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username
).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user, account_type=UserProfile.
ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[
'business_name'])
new_profile.save()
return new_user
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.models import User
from ServicePad.apps.account.models import UserProfile
import hashlib, random, datetime
from ServicePad.apps.registration.models import ActivationKey
MIN_PASSWORD_LENGTH=8
MAX_PASSWORD_LENGTH=30
class UserRegistrationForm(forms.Form):
first_name = forms.CharField(required=True,max_length=30)
last_name = forms.CharField(required=True,max_length=30)
email = forms.EmailField(required=True,max_length=30)
password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_VOLUNTEER)
def clean(self):
cleaned_data = self.cleaned_data
#Verify usernames
try:
User.objects.get(username__exact=cleaned_data.get('email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("Email already exists")
#Verify Passwords
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("Passwords do not match")
del cleaned_data['password']
del cleaned_data['confirm_password']
account_type = int(cleaned_data.get('form_type'))
if account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type != UserProfile.ACCOUNT_ORGANIZATION:
raise forms.ValidationError("Invalid account type")
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['email'], self.cleaned_data['email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
new_user.is_active = False
new_user.save()
#create the activation key
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
key_obj = ActivationKey(user=new_user,activation_key=activation_key,key_expires=key_expires)
key_obj.save()
new_profile = UserProfile(user=new_user,account_type=UserProfile.ACCOUNT_VOLUNTEER)
new_profile.save()
return new_user
class OrganizationRegistrationForm(forms.Form):
business_name = forms.CharField(required=True,max_length=60)
primary_contact_first_name = forms.CharField(required=True,max_length=30)
primary_contact_last_name = forms.CharField(required=True,max_length=30)
primary_contact_phone = forms.CharField(required=True,max_length=30)
primary_contact_email = forms.EmailField(required=True,max_length=30)
password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)
form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_ORGANIZATION)
def clean(self):
cleaned_data = self.cleaned_data
#Verify usernames
try:
User.objects.get(username__exact=cleaned_data.get('primary_contact_email'))
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("Email already exists")
#Verify Passwords
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if password != confirm_password:
raise forms.ValidationError("Passwords do not match")
del cleaned_data['password']
del cleaned_data['confirm_password']
return cleaned_data
def save(self):
new_user = User.objects.create_user(self.cleaned_data['primary_contact_email'], self.cleaned_data['primary_contact_email'], self.cleaned_data.get('password'))
new_user.first_name = self.cleaned_data['primary_contact_first_name']
new_user.last_name = self.cleaned_data['primary_contact_last_name']
new_user.is_active = False
new_user.save()
salt = str(random.random())
hash_salt = hashlib.sha224(salt).hexdigest()
activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]
key_expires = datetime.datetime.today() + datetime.timedelta(days=1)
new_profile = UserProfile(user=new_user,
account_type=UserProfile.ACCOUNT_ORGANIZATION,
business_name=self.cleaned_data['business_name']
)
new_profile.save()
return new_user
|
flexible
|
{
"blob_id": "5f680fb21fe1090dfb58f5b9260739b91ae04d99",
"index": 9922,
"step-1": "<mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-2": "<mask token>\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-3": "<mask token>\nMIN_PASSWORD_LENGTH = 8\nMAX_PASSWORD_LENGTH = 30\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-4": "from django import forms\nfrom django.contrib.auth.models import User\nfrom ServicePad.apps.account.models import UserProfile\nimport hashlib, random, datetime\nfrom ServicePad.apps.registration.models import ActivationKey\nMIN_PASSWORD_LENGTH = 8\nMAX_PASSWORD_LENGTH = 30\n\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True, max_length=30)\n last_name = forms.CharField(required=True, max_length=30)\n email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_VOLUNTEER)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n account_type = int(cleaned_data.get('form_type'))\n if (account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type !=\n UserProfile.ACCOUNT_ORGANIZATION):\n raise forms.ValidationError('Invalid account type')\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'],\n self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n key_obj = ActivationKey(user=new_user, activation_key=\n activation_key, key_expires=key_expires)\n key_obj.save()\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_VOLUNTEER)\n new_profile.save()\n return new_user\n\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True, max_length=60)\n primary_contact_first_name = forms.CharField(required=True, max_length=30)\n primary_contact_last_name = forms.CharField(required=True, max_length=30)\n primary_contact_phone = forms.CharField(required=True, max_length=30)\n primary_contact_email = forms.EmailField(required=True, max_length=30)\n password = forms.CharField(widget=forms.PasswordInput, min_length=\n MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,\n min_length=MIN_PASSWORD_LENGTH, max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(), initial=\n UserProfile.ACCOUNT_ORGANIZATION)\n\n def clean(self):\n cleaned_data = self.cleaned_data\n try:\n User.objects.get(username__exact=cleaned_data.get(\n 'primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError('Email already exists')\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError('Passwords do not match')\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n return cleaned_data\n\n def save(self):\n new_user = User.objects.create_user(self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data[\n 'primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username\n ).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user, account_type=UserProfile.\n ACCOUNT_ORGANIZATION, business_name=self.cleaned_data[\n 'business_name'])\n new_profile.save()\n return new_user\n",
"step-5": "from django import forms\nfrom django.contrib.auth.models import User\nfrom ServicePad.apps.account.models import UserProfile\nimport hashlib, random, datetime\nfrom ServicePad.apps.registration.models import ActivationKey\n\nMIN_PASSWORD_LENGTH=8\nMAX_PASSWORD_LENGTH=30\n\nclass UserRegistrationForm(forms.Form):\n first_name = forms.CharField(required=True,max_length=30)\n last_name = forms.CharField(required=True,max_length=30)\n email = forms.EmailField(required=True,max_length=30)\n password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_VOLUNTEER)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n #Verify usernames\n try:\n User.objects.get(username__exact=cleaned_data.get('email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(\"Email already exists\")\n \n #Verify Passwords\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError(\"Passwords do not match\")\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n \n account_type = int(cleaned_data.get('form_type'))\n if account_type != UserProfile.ACCOUNT_VOLUNTEER and account_type != UserProfile.ACCOUNT_ORGANIZATION:\n raise forms.ValidationError(\"Invalid account type\")\n \n \n return cleaned_data\n \n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['email'], self.cleaned_data['email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.is_active = False\n new_user.save()\n \n #create the activation key\n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n \n key_obj = ActivationKey(user=new_user,activation_key=activation_key,key_expires=key_expires)\n key_obj.save()\n \n new_profile = UserProfile(user=new_user,account_type=UserProfile.ACCOUNT_VOLUNTEER)\n \n new_profile.save()\n \n return new_user\n\nclass OrganizationRegistrationForm(forms.Form):\n business_name = forms.CharField(required=True,max_length=60)\n primary_contact_first_name = forms.CharField(required=True,max_length=30)\n primary_contact_last_name = forms.CharField(required=True,max_length=30)\n primary_contact_phone = forms.CharField(required=True,max_length=30)\n primary_contact_email = forms.EmailField(required=True,max_length=30)\n password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n confirm_password = forms.CharField(widget=forms.PasswordInput,min_length=MIN_PASSWORD_LENGTH,max_length=MAX_PASSWORD_LENGTH)\n form_type = forms.CharField(widget=forms.HiddenInput(),initial=UserProfile.ACCOUNT_ORGANIZATION)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n #Verify usernames\n try:\n User.objects.get(username__exact=cleaned_data.get('primary_contact_email'))\n except User.DoesNotExist:\n pass\n else:\n raise forms.ValidationError(\"Email already exists\")\n \n #Verify Passwords\n password = cleaned_data.get('password')\n confirm_password = cleaned_data.get('confirm_password')\n if password != confirm_password:\n raise forms.ValidationError(\"Passwords do not match\")\n del cleaned_data['password']\n del cleaned_data['confirm_password']\n \n \n return cleaned_data\n \n def save(self):\n new_user = User.objects.create_user(self.cleaned_data['primary_contact_email'], self.cleaned_data['primary_contact_email'], self.cleaned_data.get('password'))\n new_user.first_name = self.cleaned_data['primary_contact_first_name']\n new_user.last_name = self.cleaned_data['primary_contact_last_name']\n new_user.is_active = False\n new_user.save()\n \n salt = str(random.random())\n hash_salt = hashlib.sha224(salt).hexdigest()\n activation_key = hashlib.sha224(hash_salt + new_user.username).hexdigest()[:32]\n key_expires = datetime.datetime.today() + datetime.timedelta(days=1)\n new_profile = UserProfile(user=new_user,\n account_type=UserProfile.ACCOUNT_ORGANIZATION,\n business_name=self.cleaned_data['business_name']\n )\n \n new_profile.save()\n \n return new_user\n\n ",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
<|reserved_special_token_0|>
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
<|reserved_special_token_0|>
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculated_weights(x):
return sum(cs < x)
<|reserved_special_token_0|>
len(x[x == 1]) / len(x)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
<|reserved_special_token_0|>
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel('frequency')
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
<|reserved_special_token_0|>
fig.suptitle('Histograms for Y1 and Y2')
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
<|reserved_special_token_0|>
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
<|reserved_special_token_0|>
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel('Probability of condition (%)')
plt.ylabel('Probability of condition if tested positive (%)')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x = np.random.choice(2, 200, p=[0.1, 0.9])
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
len(x[x == 1]) / len(x)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel('frequency')
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]
)
fig, ax = plt.subplots(2)
fig.suptitle('Histograms for Y1 and Y2')
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
x = np.random.choice(2, 10000, p=[0.99, 0.01])
y0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])
y1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel('Probability of condition (%)')
plt.ylabel('Probability of condition if tested positive (%)')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import matplotlib.pyplot as plt
x = np.random.choice(2, 200, p=[0.1, 0.9])
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
len(x[x == 1]) / len(x)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel('frequency')
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]
)
fig, ax = plt.subplots(2)
fig.suptitle('Histograms for Y1 and Y2')
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
x = np.random.choice(2, 10000, p=[0.99, 0.01])
y0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])
y1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond
):
x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -
prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,
prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0 == 1]))
pos_with_meas = np.ones(len(y1[y1 == 1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,
prob_neg_given_no_cond):
return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *
prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel('Probability of condition (%)')
plt.ylabel('Probability of condition if tested positive (%)')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
CST 383, measles simulation homework
# Here's a question. Suppose 1% of people have measles, that the
# test for measles if 98% accurate if you do have measles, and 98%
# accurate if you don't have measles. Then what is the probability
# that you have measles, given that you have tested positive for them?
#
# Try guessing an answer before you start on this assignment.
#
# In this homework we will use simulation to estimate the answer,
# and we'll also compute the answer using Bayes' Law. There
# are three parts below:
# 1. Warm up by simulating some coin flips.
# 2. Use simulation to answer the question above.
# 3. Use Bayes' Law to answer the question without simulation.
"""
import numpy as np
import matplotlib.pyplot as plt
# Instructions:
# Problems start with #@ and then give a number. Enter your
# Python code after each problem. Do not use any variables
# in your answer except for the ones that the problem says
# you can assume are defined.
#
# Part 1: warmup
#
#@ 1
# Simulate flipping a coin 200 times that has a 90% chance of
# landing heads. Store your result in a NumPy array x of length
# 200 that contains only 0 or 1, where 1 represents heads.
# Use np.random.choice().
# (assignment to x)
x = np.random.choice(2, 200, p=[0.1, 0.9])
#@ 2
# Repeat the problem above, but this time use np.random.sample(),
# which gives values between 0 and 1. Obviously you will need to do
# further processing to turn the output of sample() into your
# array x. This will take a little thought.
# (assignment to x)
x = np.random.sample(size=200)
weights = [0.1, 0.9]
cs = np.cumsum(weights)
def calculated_weights(x):
return sum(cs < x)
vectroized_calculated_weights = np.vectorize(calculated_weights)
x = vectroized_calculated_weights(x)
#@ 3
# compute the fraction of values in array x that are 1.
# (expression)
len(x[x == 1]) / len(x)
#@ 4
# Flip the weighted coin of problem 1 200 times, compute the fraction
# of values that are 1, and repeat this entire process 100 times to
# get an array of length 100. Assign this array to variable y1.
# (assignment to y1)
def t200():
return np.random.choice(2, 200, p=[0.1, 0.9])
y1 = np.array([len(t200()[t200()==1])/len(t200()) for i in range(100)])
#@ 5
# plot a histogram of y1 using matplotlib
# (produce a plot)
plt.hist(y1)
plt.title("Fraction of 1's for 200 biased coin tosses a 100 times")
plt.xlabel("Fraction of 1's in a given attempt (of 200 tosses)")
plt.ylabel("frequency")
#@ 6
# compute a NumPy array y2 that is just like y1, except that in creating y2
# we do 1000 coin flips in each experiment, not 200.
# (assignment to y2)
def t1000():
return np.random.choice(2, 1000, p=[0.1, 0.9])
y2 = np.array([len(t1000()[t1000()==1])/len(t1000()) for i in range(100)])
#@ 7
# plot histograms for y1 and y2, with the histogram for y1 above
# the plot for y2. Our lecture notes show how to do this; see
# the 'multiple subplots' slide. Use matplotlib. In both histograms,
# let the x axis values range from 0.85 to 0.95. Please study
# the two histograms and think about why they are different.
# Assume y1 and y2 are defined.
# (produce a plot)
fig, ax = plt.subplots(2)
fig.suptitle("Histograms for Y1 and Y2")
ax[0].hist(y1)
ax[1].hist(y2)
ax[0].set_xlim([0.85, 0.95])
ax[1].set_xlim([0.85, 0.95])
#
# Part 2 - simulate the answer to the question
#
#@ 8
# Simulate the overall occurrence of measles among 10,000 people,
# based on the assumption that each person has a 0.01% chance of
# having measles.
# Compute a NumPy array x of length 10,000, where each value is
# either 0 or 1. Each of the 10,000 values should be found by
# "flipping a 0/1 coin" that is weighted 99% to 0. Approximately
# 99% of the values in x should be 0, and the others should be one.
# (assignment to x)
x = np.random.choice(2, 10000, p=[0.99, 0.01])
#@ 9
# Simulate the measles test results on the people without measles,
# based on the assumption that the measles test gives the right
# answer about 95% of the time on people without measles.
# Create an array y0, which is as long as the number of 0's in
# array x, by flipping a 0/1 coin that is weighted 95% to 0.
# Assume x is defined.
# (assignment to y0)
y0 = np.random.choice(2, len(x[x==0]), p=[0.95, 0.05])
#@ 10
# Simulate the measles test results on the people with measles,
# based on the assumption that the measles test gives the right
# answer about 98% of the time on people with measles.
# Create an array y1, which is as long as the number of 1's in
# array x, by flipping a 0/1 coin that is weighted 98% to 1.
# Assume x is defined.
# (assignment to y1)
y1 = np.random.choice(2, len(x[x==1]), p=[0.02, 0.98])
#@ 11
# Collect the measles-free people among those who tested positive.
# Compute a vector pos_no_meas that is all 0's, and is as long as the
# number of 1's in y0.
# Assume y0 is defined.
# (assignment to pos_no_meas)
pos_no_meas = np.zeros(len(y0[y0==1]))
#@ 12
# Collect the measles-infected people among those who tested positive.
# Compute a vector pos_with_meas that is all 1's, and is as long as
# the number of 1's in y1.
# Assume y1 is defined.
# (assignment to pos_with_meas)
pos_with_meas = np.ones(len(y1[y1==1]))
#@ 13
# Collect information about all people who tested positive.
# Concatenate arrays pos_no_meas and pos_with_meas, and assign
# the result to array 'tested_pos'. A 0 in in this array means
# no measles; a 1 means measles.
# Assume pos_no_meas and pos_with_meas are defined.
# (assignment to tested_pos)
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
#@ 14
# Estimate the probability of having measles if you've tested
# positive for measles. Compute the fraction of values in
# tested_positive that are 1, and assign the result to
# variable 'p'.
# Assume tested_pos is defined.
# (assignment to p)
p = len(tested_pos[tested_pos == 1]) / len(tested_pos)
#@ 15
# Package up your code into a function 'prob_cond_given_pos'. This
# function will return the probability of having a condition, based
# on certain probabilities.
# The function should have the following parameters:
# prob_cond - probability of a condition (above you used 0.01)
# prob_pos_given_cond - probability of testing positive given condition (you used 0.98)
# prob_neg_given_no_cond - probability of testing negative given no condition (you used 0.95)
# The function must return the probability of having the condition.
#
# Your function should return a slightly different value every time.
# When you run prob_cond_given_pos(0.01, 0.98, 0.95), you should get an answer
# similar to the value of p you just computed.
#
# Here is the output from tests I ran with my code:
# test 1:
# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(1000)]).mean()
# output: 0.8180582615720287
# test 2:
# np.array([prob_cond_given_pos(0.3, 0.8, 0.7) for i in range(1000)]).mean()
# output: 0.5334712339397902
# test 3:
# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(100)]).std()
# output: 0.00550051982001144
#
## I provided the function header. You should fill out the function body,
# including the return statement.
# (define a function)
def prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):
x = np.random.choice(2, 10000, p=[1-prob_cond, prob_cond])
y0 = np.random.choice(2, len(x[x==0]), p=[prob_neg_given_no_cond, 1-prob_neg_given_no_cond])
y1 = np.random.choice(2, len(x[x==1]), p=[1-prob_pos_given_cond, prob_pos_given_cond])
pos_no_meas = np.zeros(len(y0[y0==1]))
pos_with_meas = np.ones(len(y1[y1==1]))
tested_pos = np.concatenate((pos_no_meas, pos_with_meas))
return len(tested_pos[tested_pos == 1]) / len(tested_pos)
#
# Part 3 - compute the answer using Bayes' Law
#
#@ 16
# Write a function 'prob_cond_given_pos_bayes'. This function
# will take the same parameters as prob_cond_given_pos, but will
# use Bayes' Law to compute the result.
#
# Here is some output from my code:
# test1:
# prob_cond_given_pos_bayes(0.5, 0.9, 0.8)
# output: 0.1818...
# test 2:
# prob_cond_given_pos_bayes(0.3, 0.8, 0.7)
# output: 0.5333...
#
# I provided the function header. You should fill out the function body,
# including the return statement.
# (define a function)
def prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):
return (prob_pos_given_cond*prob_cond) / ((prob_pos_given_cond*prob_cond)+(1-prob_neg_given_no_cond)*(1-prob_cond))
#@ 17
# How does the probability of having a condition given you
# tested positive for it change based on how rare the
# condition is?
# Produce a histogram showing the probability of having measles
# given you tested positive for measles. Compute
# prob_cond_given_pos_bayes(x, 0.98, 0.95) for x ranging
# from 0.001 to 0.10 (x is the probability of having the
# condition). Use at least 100 values of x.
# Plot the results as a scatter plot, with x on the x axis
# and probability on the y axis. Label the x and y axes
# appropriately. Use matplotlib.
# Assume function prob_cond_given_pos_bayes() is defined.
# (produce a plot)
#x = np.arange(0.001, 0.1, ((0.1-0.001)/100))
x = np.linspace(0.001, 0.1, 100)
plt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))
plt.xlabel("Probability of condition (%)")
plt.ylabel("Probability of condition if tested positive (%)")
|
flexible
|
{
"blob_id": "076d9f0c14a8070993039bbda2ffe4d52c8d2273",
"index": 1512,
"step-1": "<mask token>\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\n<mask token>\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\n<mask token>\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculated_weights(x):\n return sum(cs < x)\n\n\n<mask token>\nlen(x[x == 1]) / len(x)\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\n<mask token>\nplt.hist(y1)\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\nplt.ylabel('frequency')\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\n<mask token>\nfig.suptitle('Histograms for Y1 and Y2')\nax[0].hist(y1)\nax[1].hist(y2)\nax[0].set_xlim([0.85, 0.95])\nax[1].set_xlim([0.85, 0.95])\n<mask token>\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\n<mask token>\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\nplt.xlabel('Probability of condition (%)')\nplt.ylabel('Probability of condition if tested positive (%)')\n",
"step-3": "<mask token>\nx = np.random.choice(2, 200, p=[0.1, 0.9])\nx = np.random.sample(size=200)\nweights = [0.1, 0.9]\ncs = np.cumsum(weights)\n\n\ndef calculated_weights(x):\n return sum(cs < x)\n\n\nvectroized_calculated_weights = np.vectorize(calculated_weights)\nx = vectroized_calculated_weights(x)\nlen(x[x == 1]) / len(x)\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\ny1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])\nplt.hist(y1)\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\nplt.ylabel('frequency')\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\ny2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]\n )\nfig, ax = plt.subplots(2)\nfig.suptitle('Histograms for Y1 and Y2')\nax[0].hist(y1)\nax[1].hist(y2)\nax[0].set_xlim([0.85, 0.95])\nax[1].set_xlim([0.85, 0.95])\nx = np.random.choice(2, 10000, p=[0.99, 0.01])\ny0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])\ny1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])\npos_no_meas = np.zeros(len(y0[y0 == 1]))\npos_with_meas = np.ones(len(y1[y1 == 1]))\ntested_pos = np.concatenate((pos_no_meas, pos_with_meas))\np = len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\nx = np.linspace(0.001, 0.1, 100)\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\nplt.xlabel('Probability of condition (%)')\nplt.ylabel('Probability of condition if tested positive (%)')\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nx = np.random.choice(2, 200, p=[0.1, 0.9])\nx = np.random.sample(size=200)\nweights = [0.1, 0.9]\ncs = np.cumsum(weights)\n\n\ndef calculated_weights(x):\n return sum(cs < x)\n\n\nvectroized_calculated_weights = np.vectorize(calculated_weights)\nx = vectroized_calculated_weights(x)\nlen(x[x == 1]) / len(x)\n\n\ndef t200():\n return np.random.choice(2, 200, p=[0.1, 0.9])\n\n\ny1 = np.array([(len(t200()[t200() == 1]) / len(t200())) for i in range(100)])\nplt.hist(y1)\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\nplt.ylabel('frequency')\n\n\ndef t1000():\n return np.random.choice(2, 1000, p=[0.1, 0.9])\n\n\ny2 = np.array([(len(t1000()[t1000() == 1]) / len(t1000())) for i in range(100)]\n )\nfig, ax = plt.subplots(2)\nfig.suptitle('Histograms for Y1 and Y2')\nax[0].hist(y1)\nax[1].hist(y2)\nax[0].set_xlim([0.85, 0.95])\nax[1].set_xlim([0.85, 0.95])\nx = np.random.choice(2, 10000, p=[0.99, 0.01])\ny0 = np.random.choice(2, len(x[x == 0]), p=[0.95, 0.05])\ny1 = np.random.choice(2, len(x[x == 1]), p=[0.02, 0.98])\npos_no_meas = np.zeros(len(y0[y0 == 1]))\npos_with_meas = np.ones(len(y1[y1 == 1]))\ntested_pos = np.concatenate((pos_no_meas, pos_with_meas))\np = len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond\n ):\n x = np.random.choice(2, 10000, p=[1 - prob_cond, prob_cond])\n y0 = np.random.choice(2, len(x[x == 0]), p=[prob_neg_given_no_cond, 1 -\n prob_neg_given_no_cond])\n y1 = np.random.choice(2, len(x[x == 1]), p=[1 - prob_pos_given_cond,\n prob_pos_given_cond])\n pos_no_meas = np.zeros(len(y0[y0 == 1]))\n pos_with_meas = np.ones(len(y1[y1 == 1]))\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\n\n\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond,\n prob_neg_given_no_cond):\n return prob_pos_given_cond * prob_cond / (prob_pos_given_cond *\n prob_cond + (1 - prob_neg_given_no_cond) * (1 - prob_cond))\n\n\nx = np.linspace(0.001, 0.1, 100)\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\nplt.xlabel('Probability of condition (%)')\nplt.ylabel('Probability of condition if tested positive (%)')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nCST 383, measles simulation homework\r\n\r\n# Here's a question. Suppose 1% of people have measles, that the\r\n# test for measles if 98% accurate if you do have measles, and 98%\r\n# accurate if you don't have measles. Then what is the probability\r\n# that you have measles, given that you have tested positive for them?\r\n#\r\n# Try guessing an answer before you start on this assignment.\r\n#\r\n# In this homework we will use simulation to estimate the answer,\r\n# and we'll also compute the answer using Bayes' Law. There\r\n# are three parts below:\r\n# 1. Warm up by simulating some coin flips.\r\n# 2. Use simulation to answer the question above.\r\n# 3. Use Bayes' Law to answer the question without simulation.\r\n\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# Instructions: \r\n# Problems start with #@ and then give a number. Enter your\r\n# Python code after each problem. Do not use any variables\r\n# in your answer except for the ones that the problem says\r\n# you can assume are defined.\r\n\r\n\r\n#\r\n# Part 1: warmup\r\n#\r\n\r\n#@ 1\r\n# Simulate flipping a coin 200 times that has a 90% chance of\r\n# landing heads. Store your result in a NumPy array x of length\r\n# 200 that contains only 0 or 1, where 1 represents heads.\r\n# Use np.random.choice(). \r\n# (assignment to x)\r\nx = np.random.choice(2, 200, p=[0.1, 0.9])\r\n\r\n#@ 2\r\n# Repeat the problem above, but this time use np.random.sample(),\r\n# which gives values between 0 and 1. Obviously you will need to do\r\n# further processing to turn the output of sample() into your\r\n# array x. This will take a little thought.\r\n# (assignment to x)\r\nx = np.random.sample(size=200)\r\n\r\nweights = [0.1, 0.9]\r\ncs = np.cumsum(weights)\r\n\r\ndef calculated_weights(x):\r\n return sum(cs < x)\r\n\r\nvectroized_calculated_weights = np.vectorize(calculated_weights)\r\nx = vectroized_calculated_weights(x)\r\n\r\n#@ 3\r\n# compute the fraction of values in array x that are 1.\r\n# (expression)\r\nlen(x[x == 1]) / len(x)\r\n\r\n\r\n#@ 4\r\n# Flip the weighted coin of problem 1 200 times, compute the fraction\r\n# of values that are 1, and repeat this entire process 100 times to\r\n# get an array of length 100. Assign this array to variable y1.\r\n# (assignment to y1)\r\ndef t200():\r\n return np.random.choice(2, 200, p=[0.1, 0.9])\r\n\r\ny1 = np.array([len(t200()[t200()==1])/len(t200()) for i in range(100)])\r\n\r\n#@ 5\r\n# plot a histogram of y1 using matplotlib\r\n# (produce a plot)\r\nplt.hist(y1)\r\nplt.title(\"Fraction of 1's for 200 biased coin tosses a 100 times\")\r\nplt.xlabel(\"Fraction of 1's in a given attempt (of 200 tosses)\")\r\nplt.ylabel(\"frequency\")\r\n\r\n#@ 6\r\n# compute a NumPy array y2 that is just like y1, except that in creating y2\r\n# we do 1000 coin flips in each experiment, not 200.\r\n# (assignment to y2)\r\ndef t1000():\r\n return np.random.choice(2, 1000, p=[0.1, 0.9])\r\n\r\ny2 = np.array([len(t1000()[t1000()==1])/len(t1000()) for i in range(100)])\r\n\r\n#@ 7\r\n# plot histograms for y1 and y2, with the histogram for y1 above \r\n# the plot for y2. Our lecture notes show how to do this; see\r\n# the 'multiple subplots' slide. Use matplotlib. In both histograms, \r\n# let the x axis values range from 0.85 to 0.95. Please study\r\n# the two histograms and think about why they are different.\r\n# Assume y1 and y2 are defined.\r\n# (produce a plot)\r\n\r\nfig, ax = plt.subplots(2)\r\nfig.suptitle(\"Histograms for Y1 and Y2\")\r\nax[0].hist(y1)\r\nax[1].hist(y2)\r\nax[0].set_xlim([0.85, 0.95])\r\nax[1].set_xlim([0.85, 0.95])\r\n\r\n#\r\n# Part 2 - simulate the answer to the question\r\n#\r\n\r\n#@ 8\r\n# Simulate the overall occurrence of measles among 10,000 people,\r\n# based on the assumption that each person has a 0.01% chance of\r\n# having measles. \r\n# Compute a NumPy array x of length 10,000, where each value is \r\n# either 0 or 1. Each of the 10,000 values should be found by \r\n# \"flipping a 0/1 coin\" that is weighted 99% to 0. Approximately \r\n# 99% of the values in x should be 0, and the others should be one.\r\n# (assignment to x)\r\nx = np.random.choice(2, 10000, p=[0.99, 0.01])\r\n\r\n#@ 9\r\n# Simulate the measles test results on the people without measles,\r\n# based on the assumption that the measles test gives the right\r\n# answer about 95% of the time on people without measles.\r\n# Create an array y0, which is as long as the number of 0's in\r\n# array x, by flipping a 0/1 coin that is weighted 95% to 0.\r\n# Assume x is defined.\r\n# (assignment to y0)\r\ny0 = np.random.choice(2, len(x[x==0]), p=[0.95, 0.05])\r\n\r\n\r\n#@ 10\r\n# Simulate the measles test results on the people with measles,\r\n# based on the assumption that the measles test gives the right\r\n# answer about 98% of the time on people with measles.\r\n# Create an array y1, which is as long as the number of 1's in\r\n# array x, by flipping a 0/1 coin that is weighted 98% to 1.\r\n# Assume x is defined.\r\n# (assignment to y1)\r\ny1 = np.random.choice(2, len(x[x==1]), p=[0.02, 0.98])\r\n\r\n\r\n#@ 11\r\n# Collect the measles-free people among those who tested positive.\r\n# Compute a vector pos_no_meas that is all 0's, and is as long as the\r\n# number of 1's in y0.\r\n# Assume y0 is defined.\r\n# (assignment to pos_no_meas)\r\npos_no_meas = np.zeros(len(y0[y0==1]))\r\n\r\n#@ 12\r\n# Collect the measles-infected people among those who tested positive.\r\n# Compute a vector pos_with_meas that is all 1's, and is as long as\r\n# the number of 1's in y1.\r\n# Assume y1 is defined.\r\n# (assignment to pos_with_meas)\r\npos_with_meas = np.ones(len(y1[y1==1]))\r\n\r\n#@ 13\r\n# Collect information about all people who tested positive.\r\n# Concatenate arrays pos_no_meas and pos_with_meas, and assign\r\n# the result to array 'tested_pos'. A 0 in in this array means \r\n# no measles; a 1 means measles.\r\n# Assume pos_no_meas and pos_with_meas are defined.\r\n# (assignment to tested_pos)\r\ntested_pos = np.concatenate((pos_no_meas, pos_with_meas))\r\n\r\n#@ 14\r\n# Estimate the probability of having measles if you've tested\r\n# positive for measles. Compute the fraction of values in \r\n# tested_positive that are 1, and assign the result to \r\n# variable 'p'.\r\n# Assume tested_pos is defined.\r\n# (assignment to p) \r\np = len(tested_pos[tested_pos == 1]) / len(tested_pos)\r\n\r\n\r\n#@ 15\r\n# Package up your code into a function 'prob_cond_given_pos'. This\r\n# function will return the probability of having a condition, based\r\n# on certain probabilities.\r\n# The function should have the following parameters:\r\n# prob_cond - probability of a condition (above you used 0.01)\r\n# prob_pos_given_cond - probability of testing positive given condition (you used 0.98)\r\n# prob_neg_given_no_cond - probability of testing negative given no condition (you used 0.95)\r\n# The function must return the probability of having the condition.\r\n#\r\n# Your function should return a slightly different value every time.\r\n# When you run prob_cond_given_pos(0.01, 0.98, 0.95), you should get an answer\r\n# similar to the value of p you just computed.\r\n#\r\n# Here is the output from tests I ran with my code:\r\n# test 1:\r\n# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(1000)]).mean()\r\n# output: 0.8180582615720287\r\n# test 2:\r\n# np.array([prob_cond_given_pos(0.3, 0.8, 0.7) for i in range(1000)]).mean()\r\n# output: 0.5334712339397902\r\n# test 3:\r\n# np.array([prob_cond_given_pos(0.5, 0.9, 0.8) for i in range(100)]).std()\r\n# output: 0.00550051982001144\r\n#\r\n## I provided the function header. You should fill out the function body,\r\n# including the return statement.\r\n# (define a function)\r\n\r\ndef prob_cond_given_pos(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):\r\n x = np.random.choice(2, 10000, p=[1-prob_cond, prob_cond])\r\n y0 = np.random.choice(2, len(x[x==0]), p=[prob_neg_given_no_cond, 1-prob_neg_given_no_cond])\r\n y1 = np.random.choice(2, len(x[x==1]), p=[1-prob_pos_given_cond, prob_pos_given_cond])\r\n pos_no_meas = np.zeros(len(y0[y0==1]))\r\n pos_with_meas = np.ones(len(y1[y1==1]))\r\n tested_pos = np.concatenate((pos_no_meas, pos_with_meas))\r\n return len(tested_pos[tested_pos == 1]) / len(tested_pos)\r\n\r\n#\r\n# Part 3 - compute the answer using Bayes' Law\r\n#\r\n\r\n#@ 16\r\n# Write a function 'prob_cond_given_pos_bayes'. This function\r\n# will take the same parameters as prob_cond_given_pos, but will\r\n# use Bayes' Law to compute the result.\r\n#\r\n# Here is some output from my code:\r\n# test1:\r\n# prob_cond_given_pos_bayes(0.5, 0.9, 0.8)\r\n# output: 0.1818...\r\n# test 2:\r\n# prob_cond_given_pos_bayes(0.3, 0.8, 0.7) \r\n# output: 0.5333...\r\n#\r\n# I provided the function header. You should fill out the function body,\r\n# including the return statement.\r\n# (define a function)\r\n\r\ndef prob_cond_given_pos_bayes(prob_cond, prob_pos_given_cond, prob_neg_given_no_cond):\r\n return (prob_pos_given_cond*prob_cond) / ((prob_pos_given_cond*prob_cond)+(1-prob_neg_given_no_cond)*(1-prob_cond))\r\n\r\n#@ 17\r\n# How does the probability of having a condition given you\r\n# tested positive for it change based on how rare the \r\n# condition is? \r\n# Produce a histogram showing the probability of having measles\r\n# given you tested positive for measles. Compute \r\n# prob_cond_given_pos_bayes(x, 0.98, 0.95) for x ranging\r\n# from 0.001 to 0.10 (x is the probability of having the \r\n# condition). Use at least 100 values of x.\r\n# Plot the results as a scatter plot, with x on the x axis\r\n# and probability on the y axis. Label the x and y axes\r\n# appropriately. Use matplotlib.\r\n# Assume function prob_cond_given_pos_bayes() is defined.\r\n# (produce a plot)\r\n#x = np.arange(0.001, 0.1, ((0.1-0.001)/100))\r\nx = np.linspace(0.001, 0.1, 100)\r\nplt.scatter(x, prob_cond_given_pos_bayes(x, 0.98, 0.95))\r\nplt.xlabel(\"Probability of condition (%)\")\r\nplt.ylabel(\"Probability of condition if tested positive (%)\")",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
#!/bin/env python3
"""
A tool for painting and saving Game Boy tiles.
Usage: `python3 gb-tile-painter.py`
Please see: README.md.
"""
from sys import argv, exit
# If we got an argument and it is --help or -h
if len(argv) == 2 and (argv[1] == "--help" or argv[1] == "-h"):
print(__doc__) # Print the docstring
exit(0) # And exit
from MainWindow import MainWindow
if __name__ == "__main__":
window = MainWindow()
window.mainloop()
|
normal
|
{
"blob_id": "c153c7a3a11a09ed645540632daec42e8905432a",
"index": 4165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(argv) == 2 and (argv[1] == '--help' or argv[1] == '-h'):\n print(__doc__)\n exit(0)\n<mask token>\nif __name__ == '__main__':\n window = MainWindow()\n window.mainloop()\n",
"step-3": "<mask token>\nfrom sys import argv, exit\nif len(argv) == 2 and (argv[1] == '--help' or argv[1] == '-h'):\n print(__doc__)\n exit(0)\nfrom MainWindow import MainWindow\nif __name__ == '__main__':\n window = MainWindow()\n window.mainloop()\n",
"step-4": "#!/bin/env python3\n\n\"\"\"\nA tool for painting and saving Game Boy tiles.\n\nUsage: `python3 gb-tile-painter.py`\n\nPlease see: README.md.\n\"\"\"\n\nfrom sys import argv, exit\n\n# If we got an argument and it is --help or -h\nif len(argv) == 2 and (argv[1] == \"--help\" or argv[1] == \"-h\"):\n print(__doc__) # Print the docstring\n exit(0) # And exit\n\nfrom MainWindow import MainWindow\n\nif __name__ == \"__main__\":\n window = MainWindow()\n window.mainloop()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.6 on 2021-07-17 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0032_product_sex'),
]
operations = [
migrations.AddField(
model_name='product',
name='price_ret_sale',
field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'),
),
migrations.AddField(
model_name='product',
name='size_5xl',
field=models.IntegerField(default=0, verbose_name='5XL размер'),
),
migrations.AddField(
model_name='product',
name='size_6xl',
field=models.IntegerField(default=0, verbose_name='6XL размер'),
),
migrations.AlterField(
model_name='product',
name='price_opt_2',
field=models.IntegerField(default=0, verbose_name='- 3% от 30000'),
),
migrations.AlterField(
model_name='product',
name='price_opt_3',
field=models.IntegerField(default=0, verbose_name='- 7% от 70000'),
),
migrations.AlterField(
model_name='product',
name='price_opt_4',
field=models.IntegerField(default=0, verbose_name='- 11% от 110000'),
),
migrations.AlterField(
model_name='product',
name='sex',
field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10),
),
]
|
normal
|
{
"blob_id": "09660cfcff7d5da0339da201cb18b6f63bec2df9",
"index": 1394,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0032_product_sex')]\n operations = [migrations.AddField(model_name='product', name=\n 'price_ret_sale', field=models.IntegerField(default=0, verbose_name\n ='Розничная цена, с учетом скидки')), migrations.AddField(\n model_name='product', name='size_5xl', field=models.IntegerField(\n default=0, verbose_name='5XL размер')), migrations.AddField(\n model_name='product', name='size_6xl', field=models.IntegerField(\n default=0, verbose_name='6XL размер')), migrations.AlterField(\n model_name='product', name='price_opt_2', field=models.IntegerField\n (default=0, verbose_name='- 3% от 30000')), migrations.AlterField(\n model_name='product', name='price_opt_3', field=models.IntegerField\n (default=0, verbose_name='- 7% от 70000')), migrations.AlterField(\n model_name='product', name='price_opt_4', field=models.IntegerField\n (default=0, verbose_name='- 11% от 110000')), migrations.AlterField\n (model_name='product', name='sex', field=models.CharField(choices=[\n ('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (\n 'Унисекс', 'Unisex')], default='Мужское', max_length=10))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('shop', '0032_product_sex')]\n operations = [migrations.AddField(model_name='product', name=\n 'price_ret_sale', field=models.IntegerField(default=0, verbose_name\n ='Розничная цена, с учетом скидки')), migrations.AddField(\n model_name='product', name='size_5xl', field=models.IntegerField(\n default=0, verbose_name='5XL размер')), migrations.AddField(\n model_name='product', name='size_6xl', field=models.IntegerField(\n default=0, verbose_name='6XL размер')), migrations.AlterField(\n model_name='product', name='price_opt_2', field=models.IntegerField\n (default=0, verbose_name='- 3% от 30000')), migrations.AlterField(\n model_name='product', name='price_opt_3', field=models.IntegerField\n (default=0, verbose_name='- 7% от 70000')), migrations.AlterField(\n model_name='product', name='price_opt_4', field=models.IntegerField\n (default=0, verbose_name='- 11% от 110000')), migrations.AlterField\n (model_name='product', name='sex', field=models.CharField(choices=[\n ('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (\n 'Унисекс', 'Unisex')], default='Мужское', max_length=10))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-07-17 10:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0032_product_sex'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='product',\n name='price_ret_sale',\n field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'),\n ),\n migrations.AddField(\n model_name='product',\n name='size_5xl',\n field=models.IntegerField(default=0, verbose_name='5XL размер'),\n ),\n migrations.AddField(\n model_name='product',\n name='size_6xl',\n field=models.IntegerField(default=0, verbose_name='6XL размер'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_2',\n field=models.IntegerField(default=0, verbose_name='- 3% от 30000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_3',\n field=models.IntegerField(default=0, verbose_name='- 7% от 70000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='price_opt_4',\n field=models.IntegerField(default=0, verbose_name='- 11% от 110000'),\n ),\n migrations.AlterField(\n model_name='product',\n name='sex',\n field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라.
[12, 17, 19, 17, 23] = 17
[26, 37, 26, 37, 91] = 26, 37
[28, 30, 32, 34, 144] = 없다
최빈값 : 자료의 값 중에서 가장 많이 나타난 값
① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다.
② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다.
"""
n_list = [[12, 17, 19, 17, 23],
[26, 37, 26, 37, 91],
[28, 30, 32, 34, 144],
[10, 10, 10, 10, 10]]
for numbers in n_list:
n_dict = {}
for n in numbers:
if n in n_dict:
n_dict[n] += 1
else:
n_dict[n] = 1
mode = []
if len(n_dict) == 1 or len(n_dict) == len(numbers):
print(numbers, '= 없다')
else:
mode_count = max(n_dict.values())
for e in n_dict.keys():
if n_dict[e] == mode_count:
mode.append(e)
print(numbers, '=', mode)
|
normal
|
{
"blob_id": "39f9341313e29a22ec5e05ce9371bf65e89c91bd",
"index": 25,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n",
"step-3": "<mask token>\nn_list = [[12, 17, 19, 17, 23], [26, 37, 26, 37, 91], [28, 30, 32, 34, 144],\n [10, 10, 10, 10, 10]]\nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n",
"step-4": "\"\"\"\n리스트에 있는 숫자들의 최빈값을 구하는 프로그램을 만들어라.\n\n[12, 17, 19, 17, 23] = 17\n[26, 37, 26, 37, 91] = 26, 37\n[28, 30, 32, 34, 144] = 없다\n\n최빈값 : 자료의 값 중에서 가장 많이 나타난 값 \n① 자료의 값이 모두 같거나 모두 다르면 최빈값은 없다.\n② 자료의 값이 모두 다를 때, 도수가 가장 큰 값이 1개 이상 있으면 그 값은 모두 최빈값이다.\n\"\"\"\n\nn_list = [[12, 17, 19, 17, 23],\n [26, 37, 26, 37, 91],\n [28, 30, 32, 34, 144],\n [10, 10, 10, 10, 10]]\n \nfor numbers in n_list:\n n_dict = {}\n for n in numbers:\n if n in n_dict:\n n_dict[n] += 1\n else:\n n_dict[n] = 1\n mode = []\n if len(n_dict) == 1 or len(n_dict) == len(numbers):\n print(numbers, '= 없다')\n else:\n mode_count = max(n_dict.values())\n for e in n_dict.keys():\n if n_dict[e] == mode_count:\n mode.append(e)\n print(numbers, '=', mode)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding: utf-8
"""
login.py
~~~~~~~~
木犀官网登陆API
"""
from flask import jsonify, request
from . import api
from muxiwebsite.models import User
from muxiwebsite import db
@api.route('/login/', methods=['POST'])
def login():
email = request.get_json().get("email")
pwd = request.get_json().get("password")
user = User.query.filter_by(email=email).first()
if not user:
return jsonify({}), 403
if not user.verify_password(pwd):
return jsonify({}), 400
token = user.generate_auth_token()
return jsonify ({
'token': token,
}), 200
|
normal
|
{
"blob_id": "a0dbb374f803cb05a35f823f54ef5f14eaf328b2",
"index": 3688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api.route('/login/', methods=['POST'])\ndef login():\n email = request.get_json().get('email')\n pwd = request.get_json().get('password')\n user = User.query.filter_by(email=email).first()\n if not user:\n return jsonify({}), 403\n if not user.verify_password(pwd):\n return jsonify({}), 400\n token = user.generate_auth_token()\n return jsonify({'token': token}), 200\n",
"step-3": "<mask token>\nfrom flask import jsonify, request\nfrom . import api\nfrom muxiwebsite.models import User\nfrom muxiwebsite import db\n\n\n@api.route('/login/', methods=['POST'])\ndef login():\n email = request.get_json().get('email')\n pwd = request.get_json().get('password')\n user = User.query.filter_by(email=email).first()\n if not user:\n return jsonify({}), 403\n if not user.verify_password(pwd):\n return jsonify({}), 400\n token = user.generate_auth_token()\n return jsonify({'token': token}), 200\n",
"step-4": "# coding: utf-8\n\n\"\"\"\n login.py\n ~~~~~~~~\n\n 木犀官网登陆API\n\n\"\"\"\n\nfrom flask import jsonify, request\nfrom . import api\nfrom muxiwebsite.models import User\nfrom muxiwebsite import db\n\n@api.route('/login/', methods=['POST'])\ndef login():\n email = request.get_json().get(\"email\")\n pwd = request.get_json().get(\"password\")\n\n user = User.query.filter_by(email=email).first()\n if not user:\n return jsonify({}), 403\n if not user.verify_password(pwd):\n return jsonify({}), 400\n\n token = user.generate_auth_token()\n return jsonify ({\n 'token': token,\n }), 200\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import absolute_import
from django.conf.urls import patterns, url
from sentry_plugins.jira_ac.views import JiraConfigView, \
JiraDescriptorView, JiraInstalledCallback, JiraUIWidgetView
urlpatterns = patterns(
'',
url(r'^plugin$', JiraUIWidgetView.as_view()),
url(r'^config$', JiraConfigView.as_view()),
url(r'^atlassian-connect\.json$', JiraDescriptorView.as_view()),
url(r'^installed$', JiraInstalledCallback.as_view()),
)
|
normal
|
{
"blob_id": "2440f5bc774f2e2f746a246cbb2e305965c9e576",
"index": 7788,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('^plugin$', JiraUIWidgetView.as_view()), url\n ('^config$', JiraConfigView.as_view()), url(\n '^atlassian-connect\\\\.json$', JiraDescriptorView.as_view()), url(\n '^installed$', JiraInstalledCallback.as_view()))\n",
"step-3": "from __future__ import absolute_import\nfrom django.conf.urls import patterns, url\nfrom sentry_plugins.jira_ac.views import JiraConfigView, JiraDescriptorView, JiraInstalledCallback, JiraUIWidgetView\nurlpatterns = patterns('', url('^plugin$', JiraUIWidgetView.as_view()), url\n ('^config$', JiraConfigView.as_view()), url(\n '^atlassian-connect\\\\.json$', JiraDescriptorView.as_view()), url(\n '^installed$', JiraInstalledCallback.as_view()))\n",
"step-4": "from __future__ import absolute_import\n\nfrom django.conf.urls import patterns, url\n\nfrom sentry_plugins.jira_ac.views import JiraConfigView, \\\n JiraDescriptorView, JiraInstalledCallback, JiraUIWidgetView\n\nurlpatterns = patterns(\n '',\n url(r'^plugin$', JiraUIWidgetView.as_view()),\n url(r'^config$', JiraConfigView.as_view()),\n url(r'^atlassian-connect\\.json$', JiraDescriptorView.as_view()),\n url(r'^installed$', JiraInstalledCallback.as_view()),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.version_info[0] < 3:
warnings.warn('At least Python 3.0 is required to run this program',
RuntimeWarning)
else:
print('Normal continuation')
<|reserved_special_token_1|>
import sys, warnings
if sys.version_info[0] < 3:
warnings.warn('At least Python 3.0 is required to run this program',
RuntimeWarning)
else:
print('Normal continuation')
<|reserved_special_token_1|>
import sys, warnings
if sys.version_info[0] < 3:
warnings.warn("At least Python 3.0 is required to run this program", RuntimeWarning)
else:
print('Normal continuation')
|
flexible
|
{
"blob_id": "a6d5552fa0648fcf9484a1498e4132eb80ecfc86",
"index": 2304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.version_info[0] < 3:\n warnings.warn('At least Python 3.0 is required to run this program',\n RuntimeWarning)\nelse:\n print('Normal continuation')\n",
"step-3": "import sys, warnings\nif sys.version_info[0] < 3:\n warnings.warn('At least Python 3.0 is required to run this program',\n RuntimeWarning)\nelse:\n print('Normal continuation')\n",
"step-4": "import sys, warnings\nif sys.version_info[0] < 3:\n warnings.warn(\"At least Python 3.0 is required to run this program\", RuntimeWarning)\nelse:\n print('Normal continuation')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from typing import Any
from jinja2.environment import Environment
MAX_RANGE = ... # type: int
UNSAFE_FUNCTION_ATTRIBUTES = ... # type: Any
UNSAFE_METHOD_ATTRIBUTES = ... # type: Any
UNSAFE_GENERATOR_ATTRIBUTES = ... # type: Any
def safe_range(*args): ...
def unsafe(f): ...
def is_internal_attribute(obj, attr): ...
def modifies_known_mutable(obj, attr): ...
class SandboxedEnvironment(Environment):
sandboxed = ... # type: bool
default_binop_table = ... # type: Any
default_unop_table = ... # type: Any
intercepted_binops = ... # type: Any
intercepted_unops = ... # type: Any
def intercept_unop(self, operator): ...
binop_table = ... # type: Any
unop_table = ... # type: Any
def __init__(self, *args, **kwargs) -> None: ...
def is_safe_attribute(self, obj, attr, value): ...
def is_safe_callable(self, obj): ...
def call_binop(self, context, operator, left, right): ...
def call_unop(self, context, operator, arg): ...
def getitem(self, obj, argument): ...
def getattr(self, obj, attribute): ...
def unsafe_undefined(self, obj, attribute): ...
def call(__self, __context, __obj, *args, **kwargs): ...
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
def is_safe_attribute(self, obj, attr, value): ...
|
normal
|
{
"blob_id": "697f4dd640ddba0411eb6eb68e7ce079a6330670",
"index": 9837,
"step-1": "<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def intercept_unop(self, operator):\n ...\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n <mask token>\n <mask token>\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n <mask token>\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-2": "<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def intercept_unop(self, operator):\n ...\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n\n def call_binop(self, context, operator, left, right):\n ...\n <mask token>\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n <mask token>\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-3": "<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def intercept_unop(self, operator):\n ...\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n\n def call_binop(self, context, operator, left, right):\n ...\n\n def call_unop(self, context, operator, arg):\n ...\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n\n def call(__self, __context, __obj, *args, **kwargs):\n ...\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-4": "<mask token>\n\n\ndef safe_range(*args):\n ...\n\n\ndef unsafe(f):\n ...\n\n\ndef is_internal_attribute(obj, attr):\n ...\n\n\n<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n sandboxed = ...\n default_binop_table = ...\n default_unop_table = ...\n intercepted_binops = ...\n intercepted_unops = ...\n\n def intercept_unop(self, operator):\n ...\n binop_table = ...\n unop_table = ...\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n\n def call_binop(self, context, operator, left, right):\n ...\n\n def call_unop(self, context, operator, arg):\n ...\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n\n def call(__self, __context, __obj, *args, **kwargs):\n ...\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-5": "from typing import Any\nfrom jinja2.environment import Environment\n\nMAX_RANGE = ... # type: int\nUNSAFE_FUNCTION_ATTRIBUTES = ... # type: Any\nUNSAFE_METHOD_ATTRIBUTES = ... # type: Any\nUNSAFE_GENERATOR_ATTRIBUTES = ... # type: Any\n\ndef safe_range(*args): ...\ndef unsafe(f): ...\ndef is_internal_attribute(obj, attr): ...\ndef modifies_known_mutable(obj, attr): ...\n\nclass SandboxedEnvironment(Environment):\n sandboxed = ... # type: bool\n default_binop_table = ... # type: Any\n default_unop_table = ... # type: Any\n intercepted_binops = ... # type: Any\n intercepted_unops = ... # type: Any\n def intercept_unop(self, operator): ...\n binop_table = ... # type: Any\n unop_table = ... # type: Any\n def __init__(self, *args, **kwargs) -> None: ...\n def is_safe_attribute(self, obj, attr, value): ...\n def is_safe_callable(self, obj): ...\n def call_binop(self, context, operator, left, right): ...\n def call_unop(self, context, operator, arg): ...\n def getitem(self, obj, argument): ...\n def getattr(self, obj, attribute): ...\n def unsafe_undefined(self, obj, attribute): ...\n def call(__self, __context, __obj, *args, **kwargs): ...\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n def is_safe_attribute(self, obj, attr, value): ...\n",
"step-ids": [
10,
11,
13,
17,
21
]
}
|
[
10,
11,
13,
17,
21
] |
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Solution:
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
class Solution:
def reverseList(self, head):
return self._reverse(head)
def _reverse(self, node, prev=None):
if not node:
return prev
n = node.next
node.next = prev
return self._reverse(n, node)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
def recursiveReverseList(self, node):
if node.next == None:
new_head = cur = ListNode(node.val)
return new_head, cur
new_head, cur_node = self.recursiveReverseList(node.next)
cur_node.next = ListNode(node.val)
return new_head, cur_node.next
class Solution:
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
class Solution:
def reverseList(self, head):
return self._reverse(head)
def _reverse(self, node, prev=None):
if not node:
return prev
n = node.next
node.next = prev
return self._reverse(n, node)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def reverseList(self, head: ListNode) ->ListNode:
if head is None:
return head
new_h, cur_nd = self.recursiveReverseList(head)
cur_nd.next = None
return new_h
def recursiveReverseList(self, node):
if node.next == None:
new_head = cur = ListNode(node.val)
return new_head, cur
new_head, cur_node = self.recursiveReverseList(node.next)
cur_node.next = ListNode(node.val)
return new_head, cur_node.next
class Solution:
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
class Solution:
def reverseList(self, head):
return self._reverse(head)
def _reverse(self, node, prev=None):
if not node:
return prev
n = node.next
node.next = prev
return self._reverse(n, node)
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
class Solution:
def reverseList(self, head: ListNode) ->ListNode:
if head is None:
return head
new_h, cur_nd = self.recursiveReverseList(head)
cur_nd.next = None
return new_h
def recursiveReverseList(self, node):
if node.next == None:
new_head = cur = ListNode(node.val)
return new_head, cur
new_head, cur_node = self.recursiveReverseList(node.next)
cur_node.next = ListNode(node.val)
return new_head, cur_node.next
class Solution:
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
class Solution:
def reverseList(self, head):
return self._reverse(head)
def _reverse(self, node, prev=None):
if not node:
return prev
n = node.next
node.next = prev
return self._reverse(n, node)
<|reserved_special_token_1|>
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#######################
# Iterative solution
#######################
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if head is None:
return head
val = []
while (head):
val.append(head.val)
head = head.next
new_head = ListNode(val[-1])
pre = new_head
for i in range(len(val)-2, -1, -1):
pre.next = ListNode(val[i])
pre = pre.next
pre.next = None
return new_head
#######################
# Recursive solution
#######################
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if head is None:
return head
new_h, cur_nd = self.recursiveReverseList(head)
cur_nd.next = None
return new_h
def recursiveReverseList(self, node):
if node.next == None:
new_head = cur = ListNode(node.val)
return new_head, cur
new_head, cur_node = self.recursiveReverseList(node.next)
cur_node.next = ListNode(node.val)
return new_head, cur_node.next
#######################
# Other's iterative solution
#######################
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
prev = None
while head:
curr = head
head = head.next
curr.next = prev
prev = curr
return prev
#######################
# Other's recursive solution
#######################
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
return self._reverse(head)
def _reverse(self, node, prev=None):
if not node:
return prev
n = node.next
node.next = prev
return self._reverse(n, node)
|
flexible
|
{
"blob_id": "682495fec200ddad5a68f06bb0ec24e59036e66b",
"index": 3286,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n\n\nclass Solution:\n\n def reverseList(self, head):\n prev = None\n while head:\n curr = head\n head = head.next\n curr.next = prev\n prev = curr\n return prev\n\n\nclass Solution:\n\n def reverseList(self, head):\n return self._reverse(head)\n\n def _reverse(self, node, prev=None):\n if not node:\n return prev\n n = node.next\n node.next = prev\n return self._reverse(n, node)\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def recursiveReverseList(self, node):\n if node.next == None:\n new_head = cur = ListNode(node.val)\n return new_head, cur\n new_head, cur_node = self.recursiveReverseList(node.next)\n cur_node.next = ListNode(node.val)\n return new_head, cur_node.next\n\n\nclass Solution:\n\n def reverseList(self, head):\n prev = None\n while head:\n curr = head\n head = head.next\n curr.next = prev\n prev = curr\n return prev\n\n\nclass Solution:\n\n def reverseList(self, head):\n return self._reverse(head)\n\n def _reverse(self, node, prev=None):\n if not node:\n return prev\n n = node.next\n node.next = prev\n return self._reverse(n, node)\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def reverseList(self, head: ListNode) ->ListNode:\n if head is None:\n return head\n new_h, cur_nd = self.recursiveReverseList(head)\n cur_nd.next = None\n return new_h\n\n def recursiveReverseList(self, node):\n if node.next == None:\n new_head = cur = ListNode(node.val)\n return new_head, cur\n new_head, cur_node = self.recursiveReverseList(node.next)\n cur_node.next = ListNode(node.val)\n return new_head, cur_node.next\n\n\nclass Solution:\n\n def reverseList(self, head):\n prev = None\n while head:\n curr = head\n head = head.next\n curr.next = prev\n prev = curr\n return prev\n\n\nclass Solution:\n\n def reverseList(self, head):\n return self._reverse(head)\n\n def _reverse(self, node, prev=None):\n if not node:\n return prev\n n = node.next\n node.next = prev\n return self._reverse(n, node)\n",
"step-4": "class Solution:\n <mask token>\n\n\nclass Solution:\n\n def reverseList(self, head: ListNode) ->ListNode:\n if head is None:\n return head\n new_h, cur_nd = self.recursiveReverseList(head)\n cur_nd.next = None\n return new_h\n\n def recursiveReverseList(self, node):\n if node.next == None:\n new_head = cur = ListNode(node.val)\n return new_head, cur\n new_head, cur_node = self.recursiveReverseList(node.next)\n cur_node.next = ListNode(node.val)\n return new_head, cur_node.next\n\n\nclass Solution:\n\n def reverseList(self, head):\n prev = None\n while head:\n curr = head\n head = head.next\n curr.next = prev\n prev = curr\n return prev\n\n\nclass Solution:\n\n def reverseList(self, head):\n return self._reverse(head)\n\n def _reverse(self, node, prev=None):\n if not node:\n return prev\n n = node.next\n node.next = prev\n return self._reverse(n, node)\n",
"step-5": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n#######################\n# Iterative solution\n#######################\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if head is None:\n return head\n val = []\n while (head):\n val.append(head.val)\n head = head.next\n new_head = ListNode(val[-1])\n pre = new_head\n for i in range(len(val)-2, -1, -1):\n pre.next = ListNode(val[i])\n pre = pre.next\n pre.next = None\n return new_head\n\n\n#######################\n# Recursive solution\n#######################\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if head is None:\n return head\n new_h, cur_nd = self.recursiveReverseList(head)\n cur_nd.next = None\n return new_h\n \n def recursiveReverseList(self, node):\n if node.next == None:\n new_head = cur = ListNode(node.val)\n return new_head, cur\n new_head, cur_node = self.recursiveReverseList(node.next)\n cur_node.next = ListNode(node.val)\n return new_head, cur_node.next\n\n\n#######################\n# Other's iterative solution\n#######################\nclass Solution:\n# @param {ListNode} head\n# @return {ListNode}\n def reverseList(self, head):\n prev = None\n while head:\n curr = head\n head = head.next\n curr.next = prev\n prev = curr\n return prev\n\n\n#######################\n# Other's recursive solution\n#######################\nclass Solution:\n# @param {ListNode} head\n# @return {ListNode}\n def reverseList(self, head):\n return self._reverse(head)\n\n def _reverse(self, node, prev=None):\n if not node:\n return prev\n n = node.next\n node.next = prev\n return self._reverse(n, node)\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
import os, gc, random
from time import time
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss, f1_score, accuracy_score
from collections import Counter
from IPython.display import clear_output
import torch
from transformers import (
AutoTokenizer, RobertaTokenizerFast,
BertTokenizerFast, ElectraTokenizerFast
)
def seed_everything(seed):
print(f'Set seed to {seed}.')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def is_blackbone(n):
return n.startswith('model')
def evaluation(ytrue, y_pred, labels=[0,1,2,3]):
log = log_loss(ytrue, y_pred, labels=labels)
f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')
acc = accuracy_score(ytrue, y_pred.argmax(1))
return {'Logloss': log, 'F1': f1, 'Acc': acc}
def getTokenizer(model_config, tok_name):
return AutoTokenizer.from_pretrained(tok_name, config=model_config, add_prefix_space=False)
class EarlyStopping:
def __init__(self, patience=5, mode='max'):
self.step = 0
self.stop = False
self.score = 0
self.patience = patience
self.mode = mode
self.mult = 1 if mode=='max' else -1
def update(self, score):
if self.mult*(self.score-score) > 0:
self.step += 1
else:
self.step = 0
self.score = score
if self.step == self.patience:
self.stop = True
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
def _start(self):
self._time = time()
def _stop(self):
if not self.is_stopped:
self.is_stopped = True
self._time = time()-self._time
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return "{:02d}:{:02d}".format(*self.m_s())
def m_s(self):
t = round(self.time)
s = t%60
m = t//60
return m,s
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = "\r"
for key in kwargs.keys():
str_log += "{}: {} - ".format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time = None):
str_log = f"⏰ {time} | " if time else ""
str_log += "Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}".format(epoch, losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print("_"*100, "\nFold ", self.fold)
for p in self._print:
print("_" * 100)
print('| '+ p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv = 'cv_score.pkl', path='evals/roberta-base/'):
assert df.fold.nunique()==n_folds, "Unconsistency between df.n_folds and n_folds"
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path+'fold_{}_best_eval.npy'
shape = ( self.df.shape[0], self.df.label.nunique() )
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold==i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds #oof_cv_scores
self.score2 = evaluation(self.df.label.values, eval_preds, labels=self.df.label.unique())[self.metric_name] #ovr_score
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1, self.score2))
if return_score: return self.score1, self.score2
|
normal
|
{
"blob_id": "458124aa0d6f04268ad052f74d546b12d3f3f5f7",
"index": 8989,
"step-1": "<mask token>\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n <mask token>\n <mask token>\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n <mask token>\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-2": "<mask token>\n\n\nclass EarlyStopping:\n\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode == 'max' else -1\n\n def update(self, score):\n if self.mult * (self.score - score) > 0:\n self.step += 1\n else:\n self.step = 0\n self.score = score\n if self.step == self.patience:\n self.stop = True\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time() - self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t % 60\n m = t // 60\n return m, s\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-3": "<mask token>\n\n\ndef is_blackbone(n):\n return n.startswith('model')\n\n\n<mask token>\n\n\ndef getTokenizer(model_config, tok_name):\n return AutoTokenizer.from_pretrained(tok_name, config=model_config,\n add_prefix_space=False)\n\n\nclass EarlyStopping:\n\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode == 'max' else -1\n\n def update(self, score):\n if self.mult * (self.score - score) > 0:\n self.step += 1\n else:\n self.step = 0\n self.score = score\n if self.step == self.patience:\n self.stop = True\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time() - self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t % 60\n m = t // 60\n return m, s\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-4": "<mask token>\n\n\ndef seed_everything(seed):\n print(f'Set seed to {seed}.')\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef is_blackbone(n):\n return n.startswith('model')\n\n\ndef evaluation(ytrue, y_pred, labels=[0, 1, 2, 3]):\n log = log_loss(ytrue, y_pred, labels=labels)\n f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')\n acc = accuracy_score(ytrue, y_pred.argmax(1))\n return {'Logloss': log, 'F1': f1, 'Acc': acc}\n\n\ndef getTokenizer(model_config, tok_name):\n return AutoTokenizer.from_pretrained(tok_name, config=model_config,\n add_prefix_space=False)\n\n\nclass EarlyStopping:\n\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode == 'max' else -1\n\n def update(self, score):\n if self.mult * (self.score - score) > 0:\n self.step += 1\n else:\n self.step = 0\n self.score = score\n if self.step == self.patience:\n self.stop = True\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time() - self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t % 60\n m = t // 60\n return m, s\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-5": "import os, gc, random\nfrom time import time\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import log_loss, f1_score, accuracy_score\nfrom collections import Counter\nfrom IPython.display import clear_output\nimport torch\nfrom transformers import (\n AutoTokenizer, RobertaTokenizerFast, \n BertTokenizerFast, ElectraTokenizerFast\n)\n\ndef seed_everything(seed):\n print(f'Set seed to {seed}.')\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\ndef is_blackbone(n):\n return n.startswith('model')\n \ndef evaluation(ytrue, y_pred, labels=[0,1,2,3]):\n log = log_loss(ytrue, y_pred, labels=labels)\n f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')\n acc = accuracy_score(ytrue, y_pred.argmax(1))\n\n return {'Logloss': log, 'F1': f1, 'Acc': acc}\n\ndef getTokenizer(model_config, tok_name):\n return AutoTokenizer.from_pretrained(tok_name, config=model_config, add_prefix_space=False)\n\nclass EarlyStopping:\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode=='max' else -1\n\n def update(self, score):\n if self.mult*(self.score-score) > 0:\n self.step += 1\n else: \n self.step = 0\n self.score = score\n \n if self.step == self.patience: \n self.stop = True\n\nclass Timer:\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time()-self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return \"{:02d}:{:02d}\".format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t%60\n m = t//60\n\n return m,s\n\n\nclass Printer:\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = \"\\r\"\n for key in kwargs.keys():\n str_log += \"{}: {} - \".format(key, kwargs[key])\n \n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time = None):\n str_log = f\"⏰ {time} | \" if time else \"\"\n str_log += \"Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}\".format(epoch, losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n\n print(\"_\"*100, \"\\nFold \", self.fold)\n for p in self._print:\n print(\"_\" * 100)\n print('| '+ p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n \n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n \n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')) == self.n_folds:\n self._clear_dirs()\n self._clear_files() \n self._create_dirs()\n\n\nclass CrossValLogger:\n def __init__(self, df, metric_name, n_folds=10, oof_cv = 'cv_score.pkl', path='evals/roberta-base/'):\n assert df.fold.nunique()==n_folds, \"Unconsistency between df.n_folds and n_folds\"\n\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path+'fold_{}_best_eval.npy'\n shape = ( self.df.shape[0], self.df.label.nunique() )\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold==i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds #oof_cv_scores\n self.score2 = evaluation(self.df.label.values, eval_preds, labels=self.df.label.unique())[self.metric_name] #ovr_score\n\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1, self.score2))\n \n if return_score: return self.score1, self.score2\n",
"step-ids": [
23,
29,
31,
33,
35
]
}
|
[
23,
29,
31,
33,
35
] |
from django.contrib import admin
from .models import CarouselImage, Budget
admin.site.register(CarouselImage)
admin.site.register(Budget)
|
normal
|
{
"blob_id": "98fb70e1911522365292c86603481656e7b86d73",
"index": 8337,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(CarouselImage)\nadmin.site.register(Budget)\n",
"step-3": "from django.contrib import admin\nfrom .models import CarouselImage, Budget\nadmin.site.register(CarouselImage)\nadmin.site.register(Budget)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
Created on June 10 2013
@author: Eugene Shim
This unit test suite is designed to test the unitTestParser module.
At the moment, the functions of that module are too simple to really
unit test effectively
'''
#Standard library modules
import unittest
#the module being tested
import unitTestParser
class TestResultsTestSuite(unittest.TestCase):
#check the verbosity
def test_results
|
normal
|
{
"blob_id": "5d0a45b93bd7972333f5574188c65484c065e9cf",
"index": 1327,
"step-1": "'''\nCreated on June 10 2013\n\n@author: Eugene Shim\n\n This unit test suite is designed to test the unitTestParser module.\n \n \n At the moment, the functions of that module are too simple to really\n unit test effectively\n'''\n\n#Standard library modules\nimport unittest\n\n#the module being tested\nimport unitTestParser \n\n\nclass TestResultsTestSuite(unittest.TestCase):\n #check the verbosity\n def test_results\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(verbose_name='Date de création', auto_now_add=True)),
('modified', models.DateTimeField(verbose_name='Date de modification', auto_now=True)),
('corporate_name', models.CharField(verbose_name='Nom', max_length=255)),
],
options={
'abstract': False,
'ordering': ('-created',),
},
),
]
|
normal
|
{
"blob_id": "6bc400896c004f0fdddbbd3dd73ef9aaa19eb4db",
"index": 1053,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Customer', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created', models.DateTimeField(\n verbose_name='Date de création', auto_now_add=True)), ('modified',\n models.DateTimeField(verbose_name='Date de modification', auto_now=\n True)), ('corporate_name', models.CharField(verbose_name='Nom',\n max_length=255))], options={'abstract': False, 'ordering': (\n '-created',)})]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Customer', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created', models.DateTimeField(\n verbose_name='Date de création', auto_now_add=True)), ('modified',\n models.DateTimeField(verbose_name='Date de modification', auto_now=\n True)), ('corporate_name', models.CharField(verbose_name='Nom',\n max_length=255))], options={'abstract': False, 'ordering': (\n '-created',)})]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(verbose_name='Date de création', auto_now_add=True)),\n ('modified', models.DateTimeField(verbose_name='Date de modification', auto_now=True)),\n ('corporate_name', models.CharField(verbose_name='Nom', max_length=255)),\n ],\n options={\n 'abstract': False,\n 'ordering': ('-created',),\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import urls
from django.urls import path
from genius.views import (home, Class_create, Class_Update, Class_Delete, Class_Detail, Classes, Add_name,
Student_Main, Student_Create, Student_Update, Student_Delete, Student_Detail, Search)
app_name = 'genius'
urlpatterns = [
path('', home, name='home'),
path('class/', Classes, name='class'),
path('class/add-name', Add_name, name='add-name'),
path('class/create', Class_create, name='create-class'),
path('class/<int:id>', Class_Detail, name='detail'),
path('class/<int:id>/edit/', Class_Update, name='update'),
path('class/<int:id>/delete/', Class_Delete, name='delete'),
path('stds/', Student_Main, name='stds'),
path('stds/create', Student_Create, name='stds-new'),
path('stds/<int:id>',Student_Detail , name='std-detail'),
path('stds/search/',Search , name='std-search'),
path('stds/<int:id>/edit/', Student_Update, name='std-update'),
path('stds/<int:id>/delete/', Student_Delete, name='std-delete'),
]
|
normal
|
{
"blob_id": "fd6a32652b845b2a6d6d8934c0dde91afdddd9f3",
"index": 9046,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'genius'\nurlpatterns = [path('', home, name='home'), path('class/', Classes, name=\n 'class'), path('class/add-name', Add_name, name='add-name'), path(\n 'class/create', Class_create, name='create-class'), path(\n 'class/<int:id>', Class_Detail, name='detail'), path(\n 'class/<int:id>/edit/', Class_Update, name='update'), path(\n 'class/<int:id>/delete/', Class_Delete, name='delete'), path('stds/',\n Student_Main, name='stds'), path('stds/create', Student_Create, name=\n 'stds-new'), path('stds/<int:id>', Student_Detail, name='std-detail'),\n path('stds/search/', Search, name='std-search'), path(\n 'stds/<int:id>/edit/', Student_Update, name='std-update'), path(\n 'stds/<int:id>/delete/', Student_Delete, name='std-delete')]\n",
"step-3": "from django import urls\nfrom django.urls import path\nfrom genius.views import home, Class_create, Class_Update, Class_Delete, Class_Detail, Classes, Add_name, Student_Main, Student_Create, Student_Update, Student_Delete, Student_Detail, Search\napp_name = 'genius'\nurlpatterns = [path('', home, name='home'), path('class/', Classes, name=\n 'class'), path('class/add-name', Add_name, name='add-name'), path(\n 'class/create', Class_create, name='create-class'), path(\n 'class/<int:id>', Class_Detail, name='detail'), path(\n 'class/<int:id>/edit/', Class_Update, name='update'), path(\n 'class/<int:id>/delete/', Class_Delete, name='delete'), path('stds/',\n Student_Main, name='stds'), path('stds/create', Student_Create, name=\n 'stds-new'), path('stds/<int:id>', Student_Detail, name='std-detail'),\n path('stds/search/', Search, name='std-search'), path(\n 'stds/<int:id>/edit/', Student_Update, name='std-update'), path(\n 'stds/<int:id>/delete/', Student_Delete, name='std-delete')]\n",
"step-4": "from django import urls\nfrom django.urls import path\nfrom genius.views import (home, Class_create, Class_Update, Class_Delete, Class_Detail, Classes, Add_name,\n Student_Main, Student_Create, Student_Update, Student_Delete, Student_Detail, Search)\n\napp_name = 'genius'\n\nurlpatterns = [\n path('', home, name='home'),\n path('class/', Classes, name='class'),\n path('class/add-name', Add_name, name='add-name'),\n path('class/create', Class_create, name='create-class'),\n path('class/<int:id>', Class_Detail, name='detail'),\n path('class/<int:id>/edit/', Class_Update, name='update'),\n path('class/<int:id>/delete/', Class_Delete, name='delete'),\n path('stds/', Student_Main, name='stds'),\n path('stds/create', Student_Create, name='stds-new'),\n path('stds/<int:id>',Student_Detail , name='std-detail'),\n path('stds/search/',Search , name='std-search'),\n path('stds/<int:id>/edit/', Student_Update, name='std-update'),\n path('stds/<int:id>/delete/', Student_Delete, name='std-delete'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding=utf-8
import pytest
from twitter_tunes.scripts import redis_data
from mock import patch
REDIS_PARSE = [
(b"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}",
{'trend1': 'url1', 'trend2': 'url2', 'trend3': 'url3'}),
(b"{}", {}),
(b"{'hello':'its me'}", {'hello': 'its me'}),
(b"{'trends': ['trend1', 'trend2', 'trend3']}",
{'trends': ['trend1', 'trend2', 'trend3']}),
(b"{'bob': []}",
{'bob': []}),
(b"{'hello': [u'its me']}", {'hello': ['its me']}),
]
GOOD_REDIS_RETURN = b"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}"
TWITTER_TRENDS = ["D'Angelo Russell",
'#ThenItAllWentHorriblyWrong',
'#SELFIEFORSEB',
'#ILikeWhatYouHave',
'#DolanTwinsNewVideo',
'#ManateePickUpLines',
'Wendy Bell',
'Brannen Greene',
'Jon Lester',
'Alison Rapp']
PARSE_LIST = [
(["D'Angelo Russell"], ['D Angelo Russell']),
(["B'O'B"], ['B O B']),
(["D''Angelo Russell"], ['D Angelo Russell']),
(["''"], [' ']),
(["D'Angelo Russ'ell"], ['D Angelo Russ ell']),
]
@pytest.mark.parametrize('data, parsed', REDIS_PARSE)
def test_parse_redis_data(data, parsed):
"""Test to see if data dict in bytes is parsed."""
assert redis_data.parse_redis_data(data) == parsed
def test_parse_redis_data_error():
"""Test to see if parse redis raises value error if bad input."""
with pytest.raises(ValueError):
redis_data.parse_redis_data(b"this is some data")
@patch('redis.from_url')
def test_get_redis_data_good_redis_key(from_url):
"""Test to see if get redis data returns data dictionary."""
mock_method = from_url().get
mock_method.return_value = GOOD_REDIS_RETURN
assert redis_data.get_redis_data('trends') == {'trend1': 'url1',
'trend2': 'url2',
'trend3': 'url3'}
@patch('redis.from_url')
def test_get_redis_data_bad_redis_key(from_url):
"""Test to see if get redis data returns data dictionary."""
mock_method = from_url().get
mock_method.return_value = None
assert redis_data.get_redis_data('bad') == {}
@patch('redis.from_url')
def test_set_redis_data(from_url):
"""Test to see if set redis data is called."""
mock_method = from_url().set
redis_data.set_redis_data('trends', 'val')
assert mock_method.call_count == 1
@patch('redis.from_url')
def test_set_redis_data_empty(from_url):
"""Test to see if set redis data is called with empty data."""
mock_method = from_url().set
redis_data.set_redis_data('trends', {})
assert mock_method.call_count == 1
def test_set_redis_no_val():
"""Test if set data fails with no arguments."""
with pytest.raises(TypeError):
redis_data.set_redis_data('key')
@pytest.mark.parametrize('data, result', PARSE_LIST)
def test_parse_redis_twiter_trends(data, result):
"""Test trend parser to remove apostrophes from trends."""
assert redis_data.redis_parse_twitter_trends(data) == result
@patch('redis.from_url')
def test_redis_set_trends(from_url):
"""Test the redis main function."""
mock_method = from_url().set
redis_data.set_redis_trend_list(TWITTER_TRENDS)
assert mock_method.call_count == 1
|
normal
|
{
"blob_id": "7f4a5779564efde7eaf08741d00254dd4aa37569",
"index": 4218,
"step-1": "<mask token>\n\n\n@pytest.mark.parametrize('data, parsed', REDIS_PARSE)\ndef test_parse_redis_data(data, parsed):\n \"\"\"Test to see if data dict in bytes is parsed.\"\"\"\n assert redis_data.parse_redis_data(data) == parsed\n\n\ndef test_parse_redis_data_error():\n \"\"\"Test to see if parse redis raises value error if bad input.\"\"\"\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b'this is some data')\n\n\n<mask token>\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_bad_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = None\n assert redis_data.get_redis_data('bad') == {}\n\n\n<mask token>\n\n\n@patch('redis.from_url')\ndef test_set_redis_data_empty(from_url):\n \"\"\"Test to see if set redis data is called with empty data.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', {})\n assert mock_method.call_count == 1\n\n\ndef test_set_redis_no_val():\n \"\"\"Test if set data fails with no arguments.\"\"\"\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')\n\n\n@pytest.mark.parametrize('data, result', PARSE_LIST)\ndef test_parse_redis_twiter_trends(data, result):\n \"\"\"Test trend parser to remove apostrophes from trends.\"\"\"\n assert redis_data.redis_parse_twitter_trends(data) == result\n\n\n@patch('redis.from_url')\ndef test_redis_set_trends(from_url):\n \"\"\"Test the redis main function.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_trend_list(TWITTER_TRENDS)\n assert mock_method.call_count == 1\n",
"step-2": "<mask token>\n\n\n@pytest.mark.parametrize('data, parsed', REDIS_PARSE)\ndef test_parse_redis_data(data, parsed):\n \"\"\"Test to see if data dict in bytes is parsed.\"\"\"\n assert redis_data.parse_redis_data(data) == parsed\n\n\ndef test_parse_redis_data_error():\n \"\"\"Test to see if parse redis raises value error if bad input.\"\"\"\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b'this is some data')\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_good_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = GOOD_REDIS_RETURN\n assert redis_data.get_redis_data('trends') == {'trend1': 'url1',\n 'trend2': 'url2', 'trend3': 'url3'}\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_bad_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = None\n assert redis_data.get_redis_data('bad') == {}\n\n\n<mask token>\n\n\n@patch('redis.from_url')\ndef test_set_redis_data_empty(from_url):\n \"\"\"Test to see if set redis data is called with empty data.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', {})\n assert mock_method.call_count == 1\n\n\ndef test_set_redis_no_val():\n \"\"\"Test if set data fails with no arguments.\"\"\"\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')\n\n\n@pytest.mark.parametrize('data, result', PARSE_LIST)\ndef test_parse_redis_twiter_trends(data, result):\n \"\"\"Test trend parser to remove apostrophes from trends.\"\"\"\n assert redis_data.redis_parse_twitter_trends(data) == result\n\n\n@patch('redis.from_url')\ndef test_redis_set_trends(from_url):\n \"\"\"Test the redis main function.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_trend_list(TWITTER_TRENDS)\n assert mock_method.call_count == 1\n",
"step-3": "<mask token>\nREDIS_PARSE = [(b\"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}\",\n {'trend1': 'url1', 'trend2': 'url2', 'trend3': 'url3'}), (b'{}', {}), (\n b\"{'hello':'its me'}\", {'hello': 'its me'}), (\n b\"{'trends': ['trend1', 'trend2', 'trend3']}\", {'trends': ['trend1',\n 'trend2', 'trend3']}), (b\"{'bob': []}\", {'bob': []}), (\n b\"{'hello': [u'its me']}\", {'hello': ['its me']})]\nGOOD_REDIS_RETURN = b\"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}\"\nTWITTER_TRENDS = [\"D'Angelo Russell\", '#ThenItAllWentHorriblyWrong',\n '#SELFIEFORSEB', '#ILikeWhatYouHave', '#DolanTwinsNewVideo',\n '#ManateePickUpLines', 'Wendy Bell', 'Brannen Greene', 'Jon Lester',\n 'Alison Rapp']\nPARSE_LIST = [([\"D'Angelo Russell\"], ['D Angelo Russell']), ([\"B'O'B\"], [\n 'B O B']), ([\"D''Angelo Russell\"], ['D Angelo Russell']), ([\"''\"], [\n ' ']), ([\"D'Angelo Russ'ell\"], ['D Angelo Russ ell'])]\n\n\n@pytest.mark.parametrize('data, parsed', REDIS_PARSE)\ndef test_parse_redis_data(data, parsed):\n \"\"\"Test to see if data dict in bytes is parsed.\"\"\"\n assert redis_data.parse_redis_data(data) == parsed\n\n\ndef test_parse_redis_data_error():\n \"\"\"Test to see if parse redis raises value error if bad input.\"\"\"\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b'this is some data')\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_good_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = GOOD_REDIS_RETURN\n assert redis_data.get_redis_data('trends') == {'trend1': 'url1',\n 'trend2': 'url2', 'trend3': 'url3'}\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_bad_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = None\n assert redis_data.get_redis_data('bad') == {}\n\n\n@patch('redis.from_url')\ndef test_set_redis_data(from_url):\n \"\"\"Test to see if set redis data is called.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', 'val')\n assert mock_method.call_count == 1\n\n\n@patch('redis.from_url')\ndef test_set_redis_data_empty(from_url):\n \"\"\"Test to see if set redis data is called with empty data.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', {})\n assert mock_method.call_count == 1\n\n\ndef test_set_redis_no_val():\n \"\"\"Test if set data fails with no arguments.\"\"\"\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')\n\n\n@pytest.mark.parametrize('data, result', PARSE_LIST)\ndef test_parse_redis_twiter_trends(data, result):\n \"\"\"Test trend parser to remove apostrophes from trends.\"\"\"\n assert redis_data.redis_parse_twitter_trends(data) == result\n\n\n@patch('redis.from_url')\ndef test_redis_set_trends(from_url):\n \"\"\"Test the redis main function.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_trend_list(TWITTER_TRENDS)\n assert mock_method.call_count == 1\n",
"step-4": "import pytest\nfrom twitter_tunes.scripts import redis_data\nfrom mock import patch\nREDIS_PARSE = [(b\"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}\",\n {'trend1': 'url1', 'trend2': 'url2', 'trend3': 'url3'}), (b'{}', {}), (\n b\"{'hello':'its me'}\", {'hello': 'its me'}), (\n b\"{'trends': ['trend1', 'trend2', 'trend3']}\", {'trends': ['trend1',\n 'trend2', 'trend3']}), (b\"{'bob': []}\", {'bob': []}), (\n b\"{'hello': [u'its me']}\", {'hello': ['its me']})]\nGOOD_REDIS_RETURN = b\"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}\"\nTWITTER_TRENDS = [\"D'Angelo Russell\", '#ThenItAllWentHorriblyWrong',\n '#SELFIEFORSEB', '#ILikeWhatYouHave', '#DolanTwinsNewVideo',\n '#ManateePickUpLines', 'Wendy Bell', 'Brannen Greene', 'Jon Lester',\n 'Alison Rapp']\nPARSE_LIST = [([\"D'Angelo Russell\"], ['D Angelo Russell']), ([\"B'O'B\"], [\n 'B O B']), ([\"D''Angelo Russell\"], ['D Angelo Russell']), ([\"''\"], [\n ' ']), ([\"D'Angelo Russ'ell\"], ['D Angelo Russ ell'])]\n\n\n@pytest.mark.parametrize('data, parsed', REDIS_PARSE)\ndef test_parse_redis_data(data, parsed):\n \"\"\"Test to see if data dict in bytes is parsed.\"\"\"\n assert redis_data.parse_redis_data(data) == parsed\n\n\ndef test_parse_redis_data_error():\n \"\"\"Test to see if parse redis raises value error if bad input.\"\"\"\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b'this is some data')\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_good_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = GOOD_REDIS_RETURN\n assert redis_data.get_redis_data('trends') == {'trend1': 'url1',\n 'trend2': 'url2', 'trend3': 'url3'}\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_bad_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = None\n assert redis_data.get_redis_data('bad') == {}\n\n\n@patch('redis.from_url')\ndef test_set_redis_data(from_url):\n \"\"\"Test to see if set redis data is called.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', 'val')\n assert mock_method.call_count == 1\n\n\n@patch('redis.from_url')\ndef test_set_redis_data_empty(from_url):\n \"\"\"Test to see if set redis data is called with empty data.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', {})\n assert mock_method.call_count == 1\n\n\ndef test_set_redis_no_val():\n \"\"\"Test if set data fails with no arguments.\"\"\"\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')\n\n\n@pytest.mark.parametrize('data, result', PARSE_LIST)\ndef test_parse_redis_twiter_trends(data, result):\n \"\"\"Test trend parser to remove apostrophes from trends.\"\"\"\n assert redis_data.redis_parse_twitter_trends(data) == result\n\n\n@patch('redis.from_url')\ndef test_redis_set_trends(from_url):\n \"\"\"Test the redis main function.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_trend_list(TWITTER_TRENDS)\n assert mock_method.call_count == 1\n",
"step-5": "# coding=utf-8\nimport pytest\nfrom twitter_tunes.scripts import redis_data\nfrom mock import patch\n\n\nREDIS_PARSE = [\n (b\"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}\",\n {'trend1': 'url1', 'trend2': 'url2', 'trend3': 'url3'}),\n (b\"{}\", {}),\n (b\"{'hello':'its me'}\", {'hello': 'its me'}),\n (b\"{'trends': ['trend1', 'trend2', 'trend3']}\",\n {'trends': ['trend1', 'trend2', 'trend3']}),\n (b\"{'bob': []}\",\n {'bob': []}),\n (b\"{'hello': [u'its me']}\", {'hello': ['its me']}),\n]\n\n\nGOOD_REDIS_RETURN = b\"{'trend3': 'url3', 'trend2': 'url2', 'trend1': 'url1'}\"\n\n\nTWITTER_TRENDS = [\"D'Angelo Russell\",\n '#ThenItAllWentHorriblyWrong',\n '#SELFIEFORSEB',\n '#ILikeWhatYouHave',\n '#DolanTwinsNewVideo',\n '#ManateePickUpLines',\n 'Wendy Bell',\n 'Brannen Greene',\n 'Jon Lester',\n 'Alison Rapp']\n\n\nPARSE_LIST = [\n ([\"D'Angelo Russell\"], ['D Angelo Russell']),\n ([\"B'O'B\"], ['B O B']),\n ([\"D''Angelo Russell\"], ['D Angelo Russell']),\n ([\"''\"], [' ']),\n ([\"D'Angelo Russ'ell\"], ['D Angelo Russ ell']),\n]\n\n\n@pytest.mark.parametrize('data, parsed', REDIS_PARSE)\ndef test_parse_redis_data(data, parsed):\n \"\"\"Test to see if data dict in bytes is parsed.\"\"\"\n assert redis_data.parse_redis_data(data) == parsed\n\n\ndef test_parse_redis_data_error():\n \"\"\"Test to see if parse redis raises value error if bad input.\"\"\"\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b\"this is some data\")\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_good_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = GOOD_REDIS_RETURN\n assert redis_data.get_redis_data('trends') == {'trend1': 'url1',\n 'trend2': 'url2',\n 'trend3': 'url3'}\n\n\n@patch('redis.from_url')\ndef test_get_redis_data_bad_redis_key(from_url):\n \"\"\"Test to see if get redis data returns data dictionary.\"\"\"\n mock_method = from_url().get\n mock_method.return_value = None\n assert redis_data.get_redis_data('bad') == {}\n\n\n@patch('redis.from_url')\ndef test_set_redis_data(from_url):\n \"\"\"Test to see if set redis data is called.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', 'val')\n assert mock_method.call_count == 1\n\n\n@patch('redis.from_url')\ndef test_set_redis_data_empty(from_url):\n \"\"\"Test to see if set redis data is called with empty data.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_data('trends', {})\n assert mock_method.call_count == 1\n\n\ndef test_set_redis_no_val():\n \"\"\"Test if set data fails with no arguments.\"\"\"\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')\n\n\n@pytest.mark.parametrize('data, result', PARSE_LIST)\ndef test_parse_redis_twiter_trends(data, result):\n \"\"\"Test trend parser to remove apostrophes from trends.\"\"\"\n assert redis_data.redis_parse_twitter_trends(data) == result\n\n\n@patch('redis.from_url')\ndef test_redis_set_trends(from_url):\n \"\"\"Test the redis main function.\"\"\"\n mock_method = from_url().set\n redis_data.set_redis_trend_list(TWITTER_TRENDS)\n assert mock_method.call_count == 1\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if display is None or 'localhost' in display:
matplotlib.use('agg')
<|reserved_special_token_0|>
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp', choices=
['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',
'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',
'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',
'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',
'random-proj', 'legendre', 'learned', 'learned-normalized',
'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',
'tile-coding'])
parser.add_argument('--frozen-model', type=str, default='', help=
'model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help=
'constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help=
'number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help=
'number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help=
'minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help=
'maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help=
'phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help=
'projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help=
'ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,
2, 3], help=
'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'
)
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help=
'Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true', help=
'once the dataset has been generated, it will be saved to a file to be loaded faster'
)
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help=
'If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help=
'Use the sin and cos of the angular velocity if angular velocities are used'
)
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help=
'do not load from cache')
<|reserved_special_token_0|>
torch.manual_seed(args.seed)
np.random.seed(args.seed)
<|reserved_special_token_0|>
print('Generating Heatmap Vectors')
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print('Heatmap Vector Generation Complete')
<|reserved_special_token_0|>
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=
0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(input_size=input_size, unroll_length=
rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.
dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(input_size=2, unroll_length=
rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.
use_lmu, order=args.lmu_order)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
<|reserved_special_token_0|>
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
if os.path.exists(cache_fname) and not args.no_cache_load:
print('Generating Train and Test Loaders from Cache')
trainloader, testloader = load_from_cache(cache_fname, batch_size=
batch_size, n_samples=n_samples)
else:
print('Generating Train and Test Loaders')
if 'tf' in args.dataset:
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
elif args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
else:
trainloader, testloader = train_test_loaders(data, n_train_samples=
n_samples, n_test_samples=n_samples, rollout_length=
rollout_length, batch_size=batch_size, encoding=args.
spatial_encoding, encoding_func=encoding_func, encoding_dim=
args.dim, train_split=args.train_split)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.
velocity_inputs, train_ssp_inputs=trainloader.dataset.
ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs
=testloader.dataset.ssp_outputs)
print('Train and Test Loaders Generation Complete')
<|reserved_special_token_0|>
print('Testing')
with torch.no_grad():
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(
velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
print('Computing predicted locations and true locations')
for ri in range(rollout_length):
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)
lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[
1], :] = dense_outputs.detach().numpy()[ri, :, :]
print(np.max(predictions))
print(np.min(predictions))
<|reserved_special_token_0|>
print(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth)
<|reserved_special_token_0|>
np.savez(fname, grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred
=grid_scores_60_separation_pred, grid_scores_90_separation_pred=
grid_scores_90_separation_pred, grid_scores_60_truth=
grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=
grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=
grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=
grid_scores_60_dense_truth, grid_scores_90_dense_truth=
grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
display = os.environ.get('DISPLAY')
if display is None or 'localhost' in display:
matplotlib.use('agg')
<|reserved_special_token_0|>
parser = argparse.ArgumentParser(
'Compute grid scores for a path integration model')
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp', choices=
['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',
'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',
'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',
'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',
'random-proj', 'legendre', 'learned', 'learned-normalized',
'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',
'tile-coding'])
parser.add_argument('--frozen-model', type=str, default='', help=
'model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help=
'constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help=
'number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help=
'number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help=
'minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help=
'maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help=
'phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help=
'projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help=
'ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,
2, 3], help=
'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'
)
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help=
'Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true', help=
'once the dataset has been generated, it will be saved to a file to be loaded faster'
)
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help=
'If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help=
'Use the sin and cos of the angular velocity if angular velocities are used'
)
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help=
'do not load from cache')
args = parser.parse_args()
ssp_scaling = args.ssp_scaling
torch.manual_seed(args.seed)
np.random.seed(args.seed)
data = np.load(args.dataset)
limit_low = 0
limit_high = 2.2
res = 128
encoding_func, dim = get_encoding_function(args, limit_low=limit_low,
limit_high=limit_high)
xs = np.linspace(limit_low, limit_high, res)
ys = np.linspace(limit_low, limit_high, res)
heatmap_vectors = np.zeros((len(xs), len(ys), dim))
print('Generating Heatmap Vectors')
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print('Heatmap Vector Generation Complete')
n_samples = args.n_samples
rollout_length = args.trajectory_length
batch_size = args.minibatch_size
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=
0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(input_size=input_size, unroll_length=
rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.
dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(input_size=2, unroll_length=
rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.
use_lmu, order=args.lmu_order)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
encoding_specific = ''
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
if os.path.exists(cache_fname) and not args.no_cache_load:
print('Generating Train and Test Loaders from Cache')
trainloader, testloader = load_from_cache(cache_fname, batch_size=
batch_size, n_samples=n_samples)
else:
print('Generating Train and Test Loaders')
if 'tf' in args.dataset:
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
elif args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
else:
trainloader, testloader = train_test_loaders(data, n_train_samples=
n_samples, n_test_samples=n_samples, rollout_length=
rollout_length, batch_size=batch_size, encoding=args.
spatial_encoding, encoding_func=encoding_func, encoding_dim=
args.dim, train_split=args.train_split)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.
velocity_inputs, train_ssp_inputs=trainloader.dataset.
ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs
=testloader.dataset.ssp_outputs)
print('Train and Test Loaders Generation Complete')
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(nbins=args.n_image_bins,
coords_range=((0, 2.2), (0, 2.2)), mask_parameters=masks_parameters)
fname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix,
args.n_samples)
print('Testing')
with torch.no_grad():
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(
velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
print('Computing predicted locations and true locations')
for ri in range(rollout_length):
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)
lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[
1], :] = dense_outputs.detach().numpy()[ri, :, :]
print(np.max(predictions))
print(np.min(predictions))
(grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred,
grid_scores_90_separation_pred) = (utils.get_scores_and_plot(scorer=
latest_epoch_scorer, data_abs_xy=predictions, activations=
lstm_activations, directory='output_grid_scores', filename=fname_lstm_pred)
)
(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth) = (utils
.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=coords,
activations=lstm_activations, directory='output_grid_scores', filename=
fname_lstm_truth))
(grid_scores_60_dense_pred, grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred
) = (utils.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=
predictions, activations=dense_activations, directory=
'output_grid_scores', filename=fname_dense_pred))
(grid_scores_60_dense_truth, grid_scores_90_dense_truth,
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth) = (utils.get_scores_and_plot(
scorer=latest_epoch_scorer, data_abs_xy=coords, activations=
dense_activations, directory='output_grid_scores', filename=
fname_dense_truth))
print(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth)
fname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix,
args.n_samples)
np.savez(fname, grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred
=grid_scores_60_separation_pred, grid_scores_90_separation_pred=
grid_scores_90_separation_pred, grid_scores_60_truth=
grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=
grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=
grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=
grid_scores_60_dense_truth, grid_scores_90_dense_truth=
grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth
)
<|reserved_special_token_1|>
import matplotlib
import os
display = os.environ.get('DISPLAY')
if display is None or 'localhost' in display:
matplotlib.use('agg')
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from datasets import train_test_loaders, angular_train_test_loaders, tf_train_test_loaders, load_from_cache
from models import SSPPathIntegrationModel
from datetime import datetime
from tensorboardX import SummaryWriter
import json
from spatial_semantic_pointers.utils import get_heatmap_vectors, ssp_to_loc, ssp_to_loc_v
from spatial_semantic_pointers.plots import plot_predictions, plot_predictions_v
import matplotlib.pyplot as plt
from path_integration_utils import pc_to_loc_v, encoding_func_from_model, pc_gauss_encoding_func, ssp_encoding_func, hd_gauss_encoding_func, hex_trig_encoding_func
from ssp_navigation.utils.encodings import get_encoding_function
import grid_scoring.scores as scores
import grid_scoring.utils as utils
from path_integration_utils import encoding_func_from_model, pc_gauss_encoding_func
parser = argparse.ArgumentParser(
'Compute grid scores for a path integration model')
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp', choices=
['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',
'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',
'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',
'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',
'random-proj', 'legendre', 'learned', 'learned-normalized',
'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',
'tile-coding'])
parser.add_argument('--frozen-model', type=str, default='', help=
'model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help=
'constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help=
'number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help=
'number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help=
'minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help=
'maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help=
'phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help=
'projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help=
'ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,
2, 3], help=
'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'
)
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help=
'Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true', help=
'once the dataset has been generated, it will be saved to a file to be loaded faster'
)
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help=
'If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help=
'Use the sin and cos of the angular velocity if angular velocities are used'
)
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help=
'do not load from cache')
args = parser.parse_args()
ssp_scaling = args.ssp_scaling
torch.manual_seed(args.seed)
np.random.seed(args.seed)
data = np.load(args.dataset)
limit_low = 0
limit_high = 2.2
res = 128
encoding_func, dim = get_encoding_function(args, limit_low=limit_low,
limit_high=limit_high)
xs = np.linspace(limit_low, limit_high, res)
ys = np.linspace(limit_low, limit_high, res)
heatmap_vectors = np.zeros((len(xs), len(ys), dim))
print('Generating Heatmap Vectors')
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print('Heatmap Vector Generation Complete')
n_samples = args.n_samples
rollout_length = args.trajectory_length
batch_size = args.minibatch_size
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=
0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(input_size=input_size, unroll_length=
rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.
dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(input_size=2, unroll_length=
rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.
use_lmu, order=args.lmu_order)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
encoding_specific = ''
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.
spatial_encoding, args.dim, args.seed, args.n_samples, args.
n_hd_cells, encoding_specific)
if os.path.exists(cache_fname) and not args.no_cache_load:
print('Generating Train and Test Loaders from Cache')
trainloader, testloader = load_from_cache(cache_fname, batch_size=
batch_size, n_samples=n_samples)
else:
print('Generating Train and Test Loaders')
if 'tf' in args.dataset:
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
elif args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(data,
n_train_samples=n_samples, n_test_samples=n_samples,
rollout_length=rollout_length, batch_size=batch_size, encoding=
args.spatial_encoding, encoding_func=encoding_func,
encoding_dim=args.dim, train_split=args.train_split, hd_dim=
args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang
=args.sin_cos_ang)
else:
trainloader, testloader = train_test_loaders(data, n_train_samples=
n_samples, n_test_samples=n_samples, rollout_length=
rollout_length, batch_size=batch_size, encoding=args.
spatial_encoding, encoding_func=encoding_func, encoding_dim=
args.dim, train_split=args.train_split)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.
velocity_inputs, train_ssp_inputs=trainloader.dataset.
ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs
=testloader.dataset.ssp_outputs)
print('Train and Test Loaders Generation Complete')
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(nbins=args.n_image_bins,
coords_range=((0, 2.2), (0, 2.2)), mask_parameters=masks_parameters)
fname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix,
args.n_samples)
fname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix,
args.n_samples)
print('Testing')
with torch.no_grad():
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(
velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],
model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
print('Computing predicted locations and true locations')
for ri in range(rollout_length):
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)
lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :
] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[
1], :] = dense_outputs.detach().numpy()[ri, :, :]
print(np.max(predictions))
print(np.min(predictions))
(grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred,
grid_scores_90_separation_pred) = (utils.get_scores_and_plot(scorer=
latest_epoch_scorer, data_abs_xy=predictions, activations=
lstm_activations, directory='output_grid_scores', filename=fname_lstm_pred)
)
(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth) = (utils
.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=coords,
activations=lstm_activations, directory='output_grid_scores', filename=
fname_lstm_truth))
(grid_scores_60_dense_pred, grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred
) = (utils.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=
predictions, activations=dense_activations, directory=
'output_grid_scores', filename=fname_dense_pred))
(grid_scores_60_dense_truth, grid_scores_90_dense_truth,
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth) = (utils.get_scores_and_plot(
scorer=latest_epoch_scorer, data_abs_xy=coords, activations=
dense_activations, directory='output_grid_scores', filename=
fname_dense_truth))
print(grid_scores_60_truth, grid_scores_90_truth,
grid_scores_60_separation_truth, grid_scores_90_separation_truth)
fname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix,
args.n_samples)
np.savez(fname, grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred
=grid_scores_60_separation_pred, grid_scores_90_separation_pred=
grid_scores_90_separation_pred, grid_scores_60_truth=
grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=
grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=
grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=
grid_scores_60_dense_truth, grid_scores_90_dense_truth=
grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=
grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth
)
<|reserved_special_token_1|>
# Compute grid scores using the new dataset format
import matplotlib
import os
# allow code to work on machines without a display or in a screen session
display = os.environ.get('DISPLAY')
if display is None or 'localhost' in display:
matplotlib.use('agg')
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from datasets import train_test_loaders, angular_train_test_loaders, tf_train_test_loaders, load_from_cache
from models import SSPPathIntegrationModel
from datetime import datetime
from tensorboardX import SummaryWriter
import json
from spatial_semantic_pointers.utils import get_heatmap_vectors, ssp_to_loc, ssp_to_loc_v
from spatial_semantic_pointers.plots import plot_predictions, plot_predictions_v
import matplotlib.pyplot as plt
from path_integration_utils import pc_to_loc_v, encoding_func_from_model, pc_gauss_encoding_func, ssp_encoding_func, \
hd_gauss_encoding_func, hex_trig_encoding_func
from ssp_navigation.utils.encodings import get_encoding_function
import grid_scoring.scores as scores
import grid_scoring.utils as utils
# from grid_scoring.run_network import run_and_gather_activations, run_and_gather_localization_activations
from path_integration_utils import encoding_func_from_model, pc_gauss_encoding_func
parser = argparse.ArgumentParser('Compute grid scores for a path integration model')
parser.add_argument('--n-samples', type=int, default=5000)
parser.add_argument('--use-localization', action='store_true')
# TODO: use these parameters
parser.add_argument('--dataset', type=str, default='')
parser.add_argument('--model', type=str, default='')
parser.add_argument('--fname-prefix', type=str, default='sac')
parser.add_argument('--spatial-encoding', type=str, default='ssp',
choices=[
'ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp', 'orth-proj-ssp',
'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp', 'sub-toroid-ssp', 'var-sub-toroid-ssp',
'random', '2d', '2d-normalized', 'one-hot', 'hex-trig',
'trig', 'random-trig', 'random-rotated-trig', 'random-proj', 'legendre',
'learned', 'learned-normalized', 'frozen-learned', 'frozen-learned-normalized',
'pc-gauss', 'pc-dog', 'tile-coding'
])
# choices=['ssp', '2d', 'frozen-learned', 'pc-gauss', 'pc-dog', 'pc-gauss-softmax', 'hex-trig', 'hex-trig-all-freq'])
parser.add_argument('--frozen-model', type=str, default='', help='model to use frozen encoding weights from')
parser.add_argument('--pc-gauss-sigma', type=float, default=0.25)
parser.add_argument('--pc-diff-sigma', type=float, default=0.5)
parser.add_argument('--hex-freq-coef', type=float, default=2.5, help='constant to scale frequencies by')
parser.add_argument('--n-tiles', type=int, default=8, help='number of layers for tile coding')
parser.add_argument('--n-bins', type=int, default=8, help='number of bins for tile coding')
parser.add_argument('--ssp-scaling', type=float, default=1.0)
parser.add_argument('--grid-ssp-min', type=float, default=0.25, help='minimum plane wave scale')
parser.add_argument('--grid-ssp-max', type=float, default=2.0, help='maximum plane wave scale')
parser.add_argument('--phi', type=float, default=0.5, help='phi as a fraction of pi for orth-proj-ssp')
parser.add_argument('--n-proj', type=int, default=3, help='projection dimension for sub toroids')
parser.add_argument('--scale-ratio', type=float, default=0, help='ratio between sub toroid scales')
parser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1, 2, 3],
help='pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid')
parser.add_argument('--seed', type=int, default=13)
parser.add_argument('--dropout-p', type=float, default=0.5)
parser.add_argument('--dim', type=int, default=512)
parser.add_argument('--train-split', type=float, default=0.8, help='Training fraction of the train/test split')
parser.add_argument('--allow-cache', action='store_true',
help='once the dataset has been generated, it will be saved to a file to be loaded faster')
parser.add_argument('--trajectory-length', type=int, default=100)
parser.add_argument('--minibatch-size', type=int, default=10)
parser.add_argument('--n-image-bins', type=int, default=20)
parser.add_argument('--n-hd-cells', type=int, default=0, help='If non-zero, use linear and angular velocity as well as HD cell output')
parser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],
help='Use the sin and cos of the angular velocity if angular velocities are used')
parser.add_argument('--use-lmu', action='store_true')
parser.add_argument('--lmu-order', type=int, default=6)
parser.add_argument('--no-cache-load', action='store_true', help='do not load from cache')
args = parser.parse_args()
ssp_scaling = args.ssp_scaling
torch.manual_seed(args.seed)
np.random.seed(args.seed)
data = np.load(args.dataset)
# only used for frozen-learned and other custom encoding functions
# encoding_func = None
limit_low = 0 #* args.ssp_scaling
limit_high = 2.2 #* args.ssp_scaling
res = 128 #256
encoding_func, dim = get_encoding_function(args, limit_low=limit_low, limit_high=limit_high)
xs = np.linspace(limit_low, limit_high, res)
ys = np.linspace(limit_low, limit_high, res)
# FIXME: inefficient but will work for now
heatmap_vectors = np.zeros((len(xs), len(ys), dim))
print("Generating Heatmap Vectors")
for i, x in enumerate(xs):
for j, y in enumerate(ys):
heatmap_vectors[i, j, :] = encoding_func(
# batch dim
# np.array(
# [[x, y]]
# )
# no batch dim
# np.array(
# [x, y]
# )
# new signature
x=x, y=y
)
heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])
print("Heatmap Vector Generation Complete")
n_samples = args.n_samples
rollout_length = args.trajectory_length
batch_size = args.minibatch_size
if args.n_hd_cells > 0:
hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=0.25, use_softmax=False, rng=np.random.RandomState(args.seed))
if args.sin_cos_ang:
input_size = 3
else:
input_size = 2
model = SSPPathIntegrationModel(
input_size=input_size, unroll_length=rollout_length,
sp_dim=dim + args.n_hd_cells, dropout_p=args.dropout_p, use_lmu=args.use_lmu, order=args.lmu_order
)
else:
hd_encoding_func = None
model = SSPPathIntegrationModel(
input_size=2, unroll_length=rollout_length,
sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.use_lmu, order=args.lmu_order
)
# model = SSPPathIntegrationModel(unroll_length=rollout_length, sp_dim=dim, dropout_p=args.dropout_p)
model.load_state_dict(torch.load(args.model), strict=False)
model.eval()
# encoding specific cache string
encoding_specific = ''
if 'ssp' in args.spatial_encoding:
encoding_specific = args.ssp_scaling
elif args.spatial_encoding == 'frozen-learned':
encoding_specific = args.frozen_model
elif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':
encoding_specific = args.pc_gauss_sigma
elif args.spatial_encoding == 'pc-dog':
encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)
elif args.spatial_encoding == 'hex-trig':
encoding_specific = args.hex_freq_coef
if 'tf' in args.dataset:
cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(
args.spatial_encoding, args.dim, args.seed, args.n_samples, args.n_hd_cells, encoding_specific
)
else:
cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(
args.spatial_encoding, args.dim, args.seed, args.n_samples, args.n_hd_cells, encoding_specific
)
# if the file exists, load it from cache
if os.path.exists(cache_fname) and not args.no_cache_load:
print("Generating Train and Test Loaders from Cache")
trainloader, testloader = load_from_cache(cache_fname, batch_size=batch_size, n_samples=n_samples)
else:
print("Generating Train and Test Loaders")
if 'tf' in args.dataset:
# tfrecord dataset only supports using the sin and cos of angular velocity
assert args.sin_cos_ang == 1
trainloader, testloader = tf_train_test_loaders(
data,
n_train_samples=n_samples,
n_test_samples=n_samples,
rollout_length=rollout_length,
batch_size=batch_size,
encoding=args.spatial_encoding,
encoding_func=encoding_func,
encoding_dim=args.dim,
train_split=args.train_split,
hd_dim=args.n_hd_cells,
hd_encoding_func=hd_encoding_func,
sin_cos_ang=args.sin_cos_ang,
)
else:
if args.n_hd_cells > 0:
trainloader, testloader = angular_train_test_loaders(
data,
n_train_samples=n_samples,
n_test_samples=n_samples,
rollout_length=rollout_length,
batch_size=batch_size,
encoding=args.spatial_encoding,
encoding_func=encoding_func,
encoding_dim=args.dim,
train_split=args.train_split,
hd_dim=args.n_hd_cells,
hd_encoding_func=hd_encoding_func,
sin_cos_ang=args.sin_cos_ang,
)
else:
trainloader, testloader = train_test_loaders(
data,
n_train_samples=n_samples,
n_test_samples=n_samples,
rollout_length=rollout_length,
batch_size=batch_size,
encoding=args.spatial_encoding,
encoding_func=encoding_func,
encoding_dim=args.dim,
train_split=args.train_split,
)
if args.allow_cache:
if not os.path.exists('dataset_cache'):
os.makedirs('dataset_cache')
np.savez(
cache_fname,
train_velocity_inputs=trainloader.dataset.velocity_inputs,
train_ssp_inputs=trainloader.dataset.ssp_inputs,
train_ssp_outputs=trainloader.dataset.ssp_outputs,
test_velocity_inputs=testloader.dataset.velocity_inputs,
test_ssp_inputs=testloader.dataset.ssp_inputs,
test_ssp_outputs=testloader.dataset.ssp_outputs,
)
print("Train and Test Loaders Generation Complete")
starts = [0.2] * 10
ends = np.linspace(0.4, 1.0, num=10)
masks_parameters = zip(starts, ends.tolist())
latest_epoch_scorer = scores.GridScorer(
nbins=args.n_image_bins,
coords_range=((0, 2.2), (0, 2.2)), # data_reader.get_coord_range(),
mask_parameters=masks_parameters,
)
fname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix, args.n_samples)
fname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix, args.n_samples)
fname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix, args.n_samples)
fname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix, args.n_samples)
# Run and gather activations
print("Testing")
with torch.no_grad():
# Everything is in one batch, so this loop will only happen once
for i, data in enumerate(testloader):
velocity_inputs, ssp_inputs, ssp_outputs = data
ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(velocity_inputs, ssp_inputs)
predictions = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], 2))
coords = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], 2))
lstm_activations = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], model.lstm_hidden_size))
dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], model.linear_hidden_size))
assert rollout_length == ssp_pred.shape[0]
# # For each neuron, contains the average activity at each spatial bin
# # Computing for both ground truth and predicted location
# rate_maps_pred = np.zeros((model.lstm_hidden_size, len(xs), len(ys)))
# rate_maps_truth = np.zeros((model.lstm_hidden_size, len(xs), len(ys)))
print("Computing predicted locations and true locations")
# Using all data, one chunk at a time
for ri in range(rollout_length):
# trim out head direction info if that was included by only looking up to args.encoding_dim
# computing 'predicted' coordinates, where the agent thinks it is
pred = ssp_pred.detach().numpy()[ri, :, :args.dim]
# pred = pred / pred.sum(axis=1)[:, np.newaxis]
predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = ssp_to_loc_v(
pred,
heatmap_vectors, xs, ys
)
# computing 'ground truth' coordinates, where the agent should be
coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]
# coord = coord / coord.sum(axis=1)[:, np.newaxis]
coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = ssp_to_loc_v(
coord,
heatmap_vectors, xs, ys
)
# reshaping activations and converting to numpy array
lstm_activations[ri*ssp_pred.shape[1]:(ri+1)*ssp_pred.shape[1], :] = lstm_outputs.detach().numpy()[ri, :, :]
dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = dense_outputs.detach().numpy()[ri, :, :]
# predictions = predictions / args.ssp_scaling
# coords = coords / args.ssp_scaling
print(np.max(predictions))
print(np.min(predictions))
grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred, grid_scores_90_separation_pred = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=predictions, #res['pos_xy'],
activations=lstm_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_lstm_pred,
)
grid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=coords, #res['pos_xy'],
activations=lstm_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_lstm_truth,
)
grid_scores_60_dense_pred, grid_scores_90_dense_pred, grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=predictions, #res['pos_xy'],
activations=dense_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_dense_pred,
)
grid_scores_60_dense_truth, grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth, grid_scores_90_separation_dense_truth = utils.get_scores_and_plot(
scorer=latest_epoch_scorer,
data_abs_xy=coords, #res['pos_xy'],
activations=dense_activations, #res['bottleneck'],
directory='output_grid_scores', #FLAGS.saver_results_directory,
filename=fname_dense_truth,
)
print(grid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth)
# Saving to make grid score values easy to compare for different variations
fname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix, args.n_samples)
np.savez(
fname,
grid_scores_60_pred=grid_scores_60_pred,
grid_scores_90_pred=grid_scores_90_pred,
grid_scores_60_separation_pred=grid_scores_60_separation_pred,
grid_scores_90_separation_pred=grid_scores_90_separation_pred,
grid_scores_60_truth=grid_scores_60_truth,
grid_scores_90_truth=grid_scores_90_truth,
grid_scores_60_separation_truth=grid_scores_60_separation_truth,
grid_scores_90_separation_truth=grid_scores_90_separation_truth,
grid_scores_60_dense_pred=grid_scores_60_dense_pred,
grid_scores_90_dense_pred=grid_scores_90_dense_pred,
grid_scores_60_separation_dense_pred=grid_scores_60_separation_dense_pred,
grid_scores_90_separation_dense_pred=grid_scores_90_separation_dense_pred,
grid_scores_60_dense_truth=grid_scores_60_dense_truth,
grid_scores_90_dense_truth=grid_scores_90_dense_truth,
grid_scores_60_separation_dense_truth=grid_scores_60_separation_dense_truth,
grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth,
)
|
flexible
|
{
"blob_id": "f4bc5663ab2b2a6dbb41a2fc3d7ca67100b455a4",
"index": 838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif display is None or 'localhost' in display:\n matplotlib.use('agg')\n<mask token>\nparser.add_argument('--n-samples', type=int, default=5000)\nparser.add_argument('--use-localization', action='store_true')\nparser.add_argument('--dataset', type=str, default='')\nparser.add_argument('--model', type=str, default='')\nparser.add_argument('--fname-prefix', type=str, default='sac')\nparser.add_argument('--spatial-encoding', type=str, default='ssp', choices=\n ['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',\n 'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',\n 'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',\n 'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',\n 'random-proj', 'legendre', 'learned', 'learned-normalized',\n 'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',\n 'tile-coding'])\nparser.add_argument('--frozen-model', type=str, default='', help=\n 'model to use frozen encoding weights from')\nparser.add_argument('--pc-gauss-sigma', type=float, default=0.25)\nparser.add_argument('--pc-diff-sigma', type=float, default=0.5)\nparser.add_argument('--hex-freq-coef', type=float, default=2.5, help=\n 'constant to scale frequencies by')\nparser.add_argument('--n-tiles', type=int, default=8, help=\n 'number of layers for tile coding')\nparser.add_argument('--n-bins', type=int, default=8, help=\n 'number of bins for tile coding')\nparser.add_argument('--ssp-scaling', type=float, default=1.0)\nparser.add_argument('--grid-ssp-min', type=float, default=0.25, help=\n 'minimum plane wave scale')\nparser.add_argument('--grid-ssp-max', type=float, default=2.0, help=\n 'maximum plane wave scale')\nparser.add_argument('--phi', type=float, default=0.5, help=\n 'phi as a fraction of pi for orth-proj-ssp')\nparser.add_argument('--n-proj', type=int, default=3, help=\n 'projection dimension for sub toroids')\nparser.add_argument('--scale-ratio', type=float, default=0, help=\n 'ratio between sub toroid scales')\nparser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,\n 2, 3], help=\n 'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'\n )\nparser.add_argument('--seed', type=int, default=13)\nparser.add_argument('--dropout-p', type=float, default=0.5)\nparser.add_argument('--dim', type=int, default=512)\nparser.add_argument('--train-split', type=float, default=0.8, help=\n 'Training fraction of the train/test split')\nparser.add_argument('--allow-cache', action='store_true', help=\n 'once the dataset has been generated, it will be saved to a file to be loaded faster'\n )\nparser.add_argument('--trajectory-length', type=int, default=100)\nparser.add_argument('--minibatch-size', type=int, default=10)\nparser.add_argument('--n-image-bins', type=int, default=20)\nparser.add_argument('--n-hd-cells', type=int, default=0, help=\n 'If non-zero, use linear and angular velocity as well as HD cell output')\nparser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],\n help=\n 'Use the sin and cos of the angular velocity if angular velocities are used'\n )\nparser.add_argument('--use-lmu', action='store_true')\nparser.add_argument('--lmu-order', type=int, default=6)\nparser.add_argument('--no-cache-load', action='store_true', help=\n 'do not load from cache')\n<mask token>\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n<mask token>\nprint('Generating Heatmap Vectors')\nfor i, x in enumerate(xs):\n for j, y in enumerate(ys):\n heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)\n heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])\nprint('Heatmap Vector Generation Complete')\n<mask token>\nif args.n_hd_cells > 0:\n hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=\n 0.25, use_softmax=False, rng=np.random.RandomState(args.seed))\n if args.sin_cos_ang:\n input_size = 3\n else:\n input_size = 2\n model = SSPPathIntegrationModel(input_size=input_size, unroll_length=\n rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.\n dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)\nelse:\n hd_encoding_func = None\n model = SSPPathIntegrationModel(input_size=2, unroll_length=\n rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.\n use_lmu, order=args.lmu_order)\nmodel.load_state_dict(torch.load(args.model), strict=False)\nmodel.eval()\n<mask token>\nif 'ssp' in args.spatial_encoding:\n encoding_specific = args.ssp_scaling\nelif args.spatial_encoding == 'frozen-learned':\n encoding_specific = args.frozen_model\nelif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':\n encoding_specific = args.pc_gauss_sigma\nelif args.spatial_encoding == 'pc-dog':\n encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)\nelif args.spatial_encoding == 'hex-trig':\n encoding_specific = args.hex_freq_coef\nif 'tf' in args.dataset:\n cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.\n spatial_encoding, args.dim, args.seed, args.n_samples, args.\n n_hd_cells, encoding_specific)\nelse:\n cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.\n spatial_encoding, args.dim, args.seed, args.n_samples, args.\n n_hd_cells, encoding_specific)\nif os.path.exists(cache_fname) and not args.no_cache_load:\n print('Generating Train and Test Loaders from Cache')\n trainloader, testloader = load_from_cache(cache_fname, batch_size=\n batch_size, n_samples=n_samples)\nelse:\n print('Generating Train and Test Loaders')\n if 'tf' in args.dataset:\n assert args.sin_cos_ang == 1\n trainloader, testloader = tf_train_test_loaders(data,\n n_train_samples=n_samples, n_test_samples=n_samples,\n rollout_length=rollout_length, batch_size=batch_size, encoding=\n args.spatial_encoding, encoding_func=encoding_func,\n encoding_dim=args.dim, train_split=args.train_split, hd_dim=\n args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang\n =args.sin_cos_ang)\n elif args.n_hd_cells > 0:\n trainloader, testloader = angular_train_test_loaders(data,\n n_train_samples=n_samples, n_test_samples=n_samples,\n rollout_length=rollout_length, batch_size=batch_size, encoding=\n args.spatial_encoding, encoding_func=encoding_func,\n encoding_dim=args.dim, train_split=args.train_split, hd_dim=\n args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang\n =args.sin_cos_ang)\n else:\n trainloader, testloader = train_test_loaders(data, n_train_samples=\n n_samples, n_test_samples=n_samples, rollout_length=\n rollout_length, batch_size=batch_size, encoding=args.\n spatial_encoding, encoding_func=encoding_func, encoding_dim=\n args.dim, train_split=args.train_split)\n if args.allow_cache:\n if not os.path.exists('dataset_cache'):\n os.makedirs('dataset_cache')\n np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.\n velocity_inputs, train_ssp_inputs=trainloader.dataset.\n ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,\n test_velocity_inputs=testloader.dataset.velocity_inputs,\n test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs\n =testloader.dataset.ssp_outputs)\nprint('Train and Test Loaders Generation Complete')\n<mask token>\nprint('Testing')\nwith torch.no_grad():\n for i, data in enumerate(testloader):\n velocity_inputs, ssp_inputs, ssp_outputs = data\n ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(\n velocity_inputs, ssp_inputs)\n predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))\n coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))\n lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],\n model.lstm_hidden_size))\n dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],\n model.linear_hidden_size))\n assert rollout_length == ssp_pred.shape[0]\n print('Computing predicted locations and true locations')\n for ri in range(rollout_length):\n pred = ssp_pred.detach().numpy()[ri, :, :args.dim]\n predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)\n coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]\n coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)\n lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = lstm_outputs.detach().numpy()[ri, :, :]\n dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[\n 1], :] = dense_outputs.detach().numpy()[ri, :, :]\nprint(np.max(predictions))\nprint(np.min(predictions))\n<mask token>\nprint(grid_scores_60_truth, grid_scores_90_truth,\n grid_scores_60_separation_truth, grid_scores_90_separation_truth)\n<mask token>\nnp.savez(fname, grid_scores_60_pred=grid_scores_60_pred,\n grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred\n =grid_scores_60_separation_pred, grid_scores_90_separation_pred=\n grid_scores_90_separation_pred, grid_scores_60_truth=\n grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,\n grid_scores_60_separation_truth=grid_scores_60_separation_truth,\n grid_scores_90_separation_truth=grid_scores_90_separation_truth,\n grid_scores_60_dense_pred=grid_scores_60_dense_pred,\n grid_scores_90_dense_pred=grid_scores_90_dense_pred,\n grid_scores_60_separation_dense_pred=\n grid_scores_60_separation_dense_pred,\n grid_scores_90_separation_dense_pred=\n grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=\n grid_scores_60_dense_truth, grid_scores_90_dense_truth=\n grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=\n grid_scores_60_separation_dense_truth,\n grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth\n )\n",
"step-3": "<mask token>\ndisplay = os.environ.get('DISPLAY')\nif display is None or 'localhost' in display:\n matplotlib.use('agg')\n<mask token>\nparser = argparse.ArgumentParser(\n 'Compute grid scores for a path integration model')\nparser.add_argument('--n-samples', type=int, default=5000)\nparser.add_argument('--use-localization', action='store_true')\nparser.add_argument('--dataset', type=str, default='')\nparser.add_argument('--model', type=str, default='')\nparser.add_argument('--fname-prefix', type=str, default='sac')\nparser.add_argument('--spatial-encoding', type=str, default='ssp', choices=\n ['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',\n 'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',\n 'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',\n 'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',\n 'random-proj', 'legendre', 'learned', 'learned-normalized',\n 'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',\n 'tile-coding'])\nparser.add_argument('--frozen-model', type=str, default='', help=\n 'model to use frozen encoding weights from')\nparser.add_argument('--pc-gauss-sigma', type=float, default=0.25)\nparser.add_argument('--pc-diff-sigma', type=float, default=0.5)\nparser.add_argument('--hex-freq-coef', type=float, default=2.5, help=\n 'constant to scale frequencies by')\nparser.add_argument('--n-tiles', type=int, default=8, help=\n 'number of layers for tile coding')\nparser.add_argument('--n-bins', type=int, default=8, help=\n 'number of bins for tile coding')\nparser.add_argument('--ssp-scaling', type=float, default=1.0)\nparser.add_argument('--grid-ssp-min', type=float, default=0.25, help=\n 'minimum plane wave scale')\nparser.add_argument('--grid-ssp-max', type=float, default=2.0, help=\n 'maximum plane wave scale')\nparser.add_argument('--phi', type=float, default=0.5, help=\n 'phi as a fraction of pi for orth-proj-ssp')\nparser.add_argument('--n-proj', type=int, default=3, help=\n 'projection dimension for sub toroids')\nparser.add_argument('--scale-ratio', type=float, default=0, help=\n 'ratio between sub toroid scales')\nparser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,\n 2, 3], help=\n 'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'\n )\nparser.add_argument('--seed', type=int, default=13)\nparser.add_argument('--dropout-p', type=float, default=0.5)\nparser.add_argument('--dim', type=int, default=512)\nparser.add_argument('--train-split', type=float, default=0.8, help=\n 'Training fraction of the train/test split')\nparser.add_argument('--allow-cache', action='store_true', help=\n 'once the dataset has been generated, it will be saved to a file to be loaded faster'\n )\nparser.add_argument('--trajectory-length', type=int, default=100)\nparser.add_argument('--minibatch-size', type=int, default=10)\nparser.add_argument('--n-image-bins', type=int, default=20)\nparser.add_argument('--n-hd-cells', type=int, default=0, help=\n 'If non-zero, use linear and angular velocity as well as HD cell output')\nparser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],\n help=\n 'Use the sin and cos of the angular velocity if angular velocities are used'\n )\nparser.add_argument('--use-lmu', action='store_true')\nparser.add_argument('--lmu-order', type=int, default=6)\nparser.add_argument('--no-cache-load', action='store_true', help=\n 'do not load from cache')\nargs = parser.parse_args()\nssp_scaling = args.ssp_scaling\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\ndata = np.load(args.dataset)\nlimit_low = 0\nlimit_high = 2.2\nres = 128\nencoding_func, dim = get_encoding_function(args, limit_low=limit_low,\n limit_high=limit_high)\nxs = np.linspace(limit_low, limit_high, res)\nys = np.linspace(limit_low, limit_high, res)\nheatmap_vectors = np.zeros((len(xs), len(ys), dim))\nprint('Generating Heatmap Vectors')\nfor i, x in enumerate(xs):\n for j, y in enumerate(ys):\n heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)\n heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])\nprint('Heatmap Vector Generation Complete')\nn_samples = args.n_samples\nrollout_length = args.trajectory_length\nbatch_size = args.minibatch_size\nif args.n_hd_cells > 0:\n hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=\n 0.25, use_softmax=False, rng=np.random.RandomState(args.seed))\n if args.sin_cos_ang:\n input_size = 3\n else:\n input_size = 2\n model = SSPPathIntegrationModel(input_size=input_size, unroll_length=\n rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.\n dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)\nelse:\n hd_encoding_func = None\n model = SSPPathIntegrationModel(input_size=2, unroll_length=\n rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.\n use_lmu, order=args.lmu_order)\nmodel.load_state_dict(torch.load(args.model), strict=False)\nmodel.eval()\nencoding_specific = ''\nif 'ssp' in args.spatial_encoding:\n encoding_specific = args.ssp_scaling\nelif args.spatial_encoding == 'frozen-learned':\n encoding_specific = args.frozen_model\nelif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':\n encoding_specific = args.pc_gauss_sigma\nelif args.spatial_encoding == 'pc-dog':\n encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)\nelif args.spatial_encoding == 'hex-trig':\n encoding_specific = args.hex_freq_coef\nif 'tf' in args.dataset:\n cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.\n spatial_encoding, args.dim, args.seed, args.n_samples, args.\n n_hd_cells, encoding_specific)\nelse:\n cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.\n spatial_encoding, args.dim, args.seed, args.n_samples, args.\n n_hd_cells, encoding_specific)\nif os.path.exists(cache_fname) and not args.no_cache_load:\n print('Generating Train and Test Loaders from Cache')\n trainloader, testloader = load_from_cache(cache_fname, batch_size=\n batch_size, n_samples=n_samples)\nelse:\n print('Generating Train and Test Loaders')\n if 'tf' in args.dataset:\n assert args.sin_cos_ang == 1\n trainloader, testloader = tf_train_test_loaders(data,\n n_train_samples=n_samples, n_test_samples=n_samples,\n rollout_length=rollout_length, batch_size=batch_size, encoding=\n args.spatial_encoding, encoding_func=encoding_func,\n encoding_dim=args.dim, train_split=args.train_split, hd_dim=\n args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang\n =args.sin_cos_ang)\n elif args.n_hd_cells > 0:\n trainloader, testloader = angular_train_test_loaders(data,\n n_train_samples=n_samples, n_test_samples=n_samples,\n rollout_length=rollout_length, batch_size=batch_size, encoding=\n args.spatial_encoding, encoding_func=encoding_func,\n encoding_dim=args.dim, train_split=args.train_split, hd_dim=\n args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang\n =args.sin_cos_ang)\n else:\n trainloader, testloader = train_test_loaders(data, n_train_samples=\n n_samples, n_test_samples=n_samples, rollout_length=\n rollout_length, batch_size=batch_size, encoding=args.\n spatial_encoding, encoding_func=encoding_func, encoding_dim=\n args.dim, train_split=args.train_split)\n if args.allow_cache:\n if not os.path.exists('dataset_cache'):\n os.makedirs('dataset_cache')\n np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.\n velocity_inputs, train_ssp_inputs=trainloader.dataset.\n ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,\n test_velocity_inputs=testloader.dataset.velocity_inputs,\n test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs\n =testloader.dataset.ssp_outputs)\nprint('Train and Test Loaders Generation Complete')\nstarts = [0.2] * 10\nends = np.linspace(0.4, 1.0, num=10)\nmasks_parameters = zip(starts, ends.tolist())\nlatest_epoch_scorer = scores.GridScorer(nbins=args.n_image_bins,\n coords_range=((0, 2.2), (0, 2.2)), mask_parameters=masks_parameters)\nfname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix,\n args.n_samples)\nfname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix,\n args.n_samples)\nfname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix,\n args.n_samples)\nfname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix,\n args.n_samples)\nprint('Testing')\nwith torch.no_grad():\n for i, data in enumerate(testloader):\n velocity_inputs, ssp_inputs, ssp_outputs = data\n ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(\n velocity_inputs, ssp_inputs)\n predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))\n coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))\n lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],\n model.lstm_hidden_size))\n dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],\n model.linear_hidden_size))\n assert rollout_length == ssp_pred.shape[0]\n print('Computing predicted locations and true locations')\n for ri in range(rollout_length):\n pred = ssp_pred.detach().numpy()[ri, :, :args.dim]\n predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)\n coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]\n coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)\n lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = lstm_outputs.detach().numpy()[ri, :, :]\n dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[\n 1], :] = dense_outputs.detach().numpy()[ri, :, :]\nprint(np.max(predictions))\nprint(np.min(predictions))\n(grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred,\n grid_scores_90_separation_pred) = (utils.get_scores_and_plot(scorer=\n latest_epoch_scorer, data_abs_xy=predictions, activations=\n lstm_activations, directory='output_grid_scores', filename=fname_lstm_pred)\n )\n(grid_scores_60_truth, grid_scores_90_truth,\n grid_scores_60_separation_truth, grid_scores_90_separation_truth) = (utils\n .get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=coords,\n activations=lstm_activations, directory='output_grid_scores', filename=\n fname_lstm_truth))\n(grid_scores_60_dense_pred, grid_scores_90_dense_pred,\n grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred\n ) = (utils.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=\n predictions, activations=dense_activations, directory=\n 'output_grid_scores', filename=fname_dense_pred))\n(grid_scores_60_dense_truth, grid_scores_90_dense_truth,\n grid_scores_60_separation_dense_truth,\n grid_scores_90_separation_dense_truth) = (utils.get_scores_and_plot(\n scorer=latest_epoch_scorer, data_abs_xy=coords, activations=\n dense_activations, directory='output_grid_scores', filename=\n fname_dense_truth))\nprint(grid_scores_60_truth, grid_scores_90_truth,\n grid_scores_60_separation_truth, grid_scores_90_separation_truth)\nfname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix,\n args.n_samples)\nnp.savez(fname, grid_scores_60_pred=grid_scores_60_pred,\n grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred\n =grid_scores_60_separation_pred, grid_scores_90_separation_pred=\n grid_scores_90_separation_pred, grid_scores_60_truth=\n grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,\n grid_scores_60_separation_truth=grid_scores_60_separation_truth,\n grid_scores_90_separation_truth=grid_scores_90_separation_truth,\n grid_scores_60_dense_pred=grid_scores_60_dense_pred,\n grid_scores_90_dense_pred=grid_scores_90_dense_pred,\n grid_scores_60_separation_dense_pred=\n grid_scores_60_separation_dense_pred,\n grid_scores_90_separation_dense_pred=\n grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=\n grid_scores_60_dense_truth, grid_scores_90_dense_truth=\n grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=\n grid_scores_60_separation_dense_truth,\n grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth\n )\n",
"step-4": "import matplotlib\nimport os\ndisplay = os.environ.get('DISPLAY')\nif display is None or 'localhost' in display:\n matplotlib.use('agg')\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom datasets import train_test_loaders, angular_train_test_loaders, tf_train_test_loaders, load_from_cache\nfrom models import SSPPathIntegrationModel\nfrom datetime import datetime\nfrom tensorboardX import SummaryWriter\nimport json\nfrom spatial_semantic_pointers.utils import get_heatmap_vectors, ssp_to_loc, ssp_to_loc_v\nfrom spatial_semantic_pointers.plots import plot_predictions, plot_predictions_v\nimport matplotlib.pyplot as plt\nfrom path_integration_utils import pc_to_loc_v, encoding_func_from_model, pc_gauss_encoding_func, ssp_encoding_func, hd_gauss_encoding_func, hex_trig_encoding_func\nfrom ssp_navigation.utils.encodings import get_encoding_function\nimport grid_scoring.scores as scores\nimport grid_scoring.utils as utils\nfrom path_integration_utils import encoding_func_from_model, pc_gauss_encoding_func\nparser = argparse.ArgumentParser(\n 'Compute grid scores for a path integration model')\nparser.add_argument('--n-samples', type=int, default=5000)\nparser.add_argument('--use-localization', action='store_true')\nparser.add_argument('--dataset', type=str, default='')\nparser.add_argument('--model', type=str, default='')\nparser.add_argument('--fname-prefix', type=str, default='sac')\nparser.add_argument('--spatial-encoding', type=str, default='ssp', choices=\n ['ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp',\n 'orth-proj-ssp', 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp',\n 'sub-toroid-ssp', 'var-sub-toroid-ssp', 'random', '2d', '2d-normalized',\n 'one-hot', 'hex-trig', 'trig', 'random-trig', 'random-rotated-trig',\n 'random-proj', 'legendre', 'learned', 'learned-normalized',\n 'frozen-learned', 'frozen-learned-normalized', 'pc-gauss', 'pc-dog',\n 'tile-coding'])\nparser.add_argument('--frozen-model', type=str, default='', help=\n 'model to use frozen encoding weights from')\nparser.add_argument('--pc-gauss-sigma', type=float, default=0.25)\nparser.add_argument('--pc-diff-sigma', type=float, default=0.5)\nparser.add_argument('--hex-freq-coef', type=float, default=2.5, help=\n 'constant to scale frequencies by')\nparser.add_argument('--n-tiles', type=int, default=8, help=\n 'number of layers for tile coding')\nparser.add_argument('--n-bins', type=int, default=8, help=\n 'number of bins for tile coding')\nparser.add_argument('--ssp-scaling', type=float, default=1.0)\nparser.add_argument('--grid-ssp-min', type=float, default=0.25, help=\n 'minimum plane wave scale')\nparser.add_argument('--grid-ssp-max', type=float, default=2.0, help=\n 'maximum plane wave scale')\nparser.add_argument('--phi', type=float, default=0.5, help=\n 'phi as a fraction of pi for orth-proj-ssp')\nparser.add_argument('--n-proj', type=int, default=3, help=\n 'projection dimension for sub toroids')\nparser.add_argument('--scale-ratio', type=float, default=0, help=\n 'ratio between sub toroid scales')\nparser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1,\n 2, 3], help=\n 'pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid'\n )\nparser.add_argument('--seed', type=int, default=13)\nparser.add_argument('--dropout-p', type=float, default=0.5)\nparser.add_argument('--dim', type=int, default=512)\nparser.add_argument('--train-split', type=float, default=0.8, help=\n 'Training fraction of the train/test split')\nparser.add_argument('--allow-cache', action='store_true', help=\n 'once the dataset has been generated, it will be saved to a file to be loaded faster'\n )\nparser.add_argument('--trajectory-length', type=int, default=100)\nparser.add_argument('--minibatch-size', type=int, default=10)\nparser.add_argument('--n-image-bins', type=int, default=20)\nparser.add_argument('--n-hd-cells', type=int, default=0, help=\n 'If non-zero, use linear and angular velocity as well as HD cell output')\nparser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],\n help=\n 'Use the sin and cos of the angular velocity if angular velocities are used'\n )\nparser.add_argument('--use-lmu', action='store_true')\nparser.add_argument('--lmu-order', type=int, default=6)\nparser.add_argument('--no-cache-load', action='store_true', help=\n 'do not load from cache')\nargs = parser.parse_args()\nssp_scaling = args.ssp_scaling\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\ndata = np.load(args.dataset)\nlimit_low = 0\nlimit_high = 2.2\nres = 128\nencoding_func, dim = get_encoding_function(args, limit_low=limit_low,\n limit_high=limit_high)\nxs = np.linspace(limit_low, limit_high, res)\nys = np.linspace(limit_low, limit_high, res)\nheatmap_vectors = np.zeros((len(xs), len(ys), dim))\nprint('Generating Heatmap Vectors')\nfor i, x in enumerate(xs):\n for j, y in enumerate(ys):\n heatmap_vectors[i, j, :] = encoding_func(x=x, y=y)\n heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])\nprint('Heatmap Vector Generation Complete')\nn_samples = args.n_samples\nrollout_length = args.trajectory_length\nbatch_size = args.minibatch_size\nif args.n_hd_cells > 0:\n hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=\n 0.25, use_softmax=False, rng=np.random.RandomState(args.seed))\n if args.sin_cos_ang:\n input_size = 3\n else:\n input_size = 2\n model = SSPPathIntegrationModel(input_size=input_size, unroll_length=\n rollout_length, sp_dim=dim + args.n_hd_cells, dropout_p=args.\n dropout_p, use_lmu=args.use_lmu, order=args.lmu_order)\nelse:\n hd_encoding_func = None\n model = SSPPathIntegrationModel(input_size=2, unroll_length=\n rollout_length, sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.\n use_lmu, order=args.lmu_order)\nmodel.load_state_dict(torch.load(args.model), strict=False)\nmodel.eval()\nencoding_specific = ''\nif 'ssp' in args.spatial_encoding:\n encoding_specific = args.ssp_scaling\nelif args.spatial_encoding == 'frozen-learned':\n encoding_specific = args.frozen_model\nelif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':\n encoding_specific = args.pc_gauss_sigma\nelif args.spatial_encoding == 'pc-dog':\n encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)\nelif args.spatial_encoding == 'hex-trig':\n encoding_specific = args.hex_freq_coef\nif 'tf' in args.dataset:\n cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(args.\n spatial_encoding, args.dim, args.seed, args.n_samples, args.\n n_hd_cells, encoding_specific)\nelse:\n cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(args.\n spatial_encoding, args.dim, args.seed, args.n_samples, args.\n n_hd_cells, encoding_specific)\nif os.path.exists(cache_fname) and not args.no_cache_load:\n print('Generating Train and Test Loaders from Cache')\n trainloader, testloader = load_from_cache(cache_fname, batch_size=\n batch_size, n_samples=n_samples)\nelse:\n print('Generating Train and Test Loaders')\n if 'tf' in args.dataset:\n assert args.sin_cos_ang == 1\n trainloader, testloader = tf_train_test_loaders(data,\n n_train_samples=n_samples, n_test_samples=n_samples,\n rollout_length=rollout_length, batch_size=batch_size, encoding=\n args.spatial_encoding, encoding_func=encoding_func,\n encoding_dim=args.dim, train_split=args.train_split, hd_dim=\n args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang\n =args.sin_cos_ang)\n elif args.n_hd_cells > 0:\n trainloader, testloader = angular_train_test_loaders(data,\n n_train_samples=n_samples, n_test_samples=n_samples,\n rollout_length=rollout_length, batch_size=batch_size, encoding=\n args.spatial_encoding, encoding_func=encoding_func,\n encoding_dim=args.dim, train_split=args.train_split, hd_dim=\n args.n_hd_cells, hd_encoding_func=hd_encoding_func, sin_cos_ang\n =args.sin_cos_ang)\n else:\n trainloader, testloader = train_test_loaders(data, n_train_samples=\n n_samples, n_test_samples=n_samples, rollout_length=\n rollout_length, batch_size=batch_size, encoding=args.\n spatial_encoding, encoding_func=encoding_func, encoding_dim=\n args.dim, train_split=args.train_split)\n if args.allow_cache:\n if not os.path.exists('dataset_cache'):\n os.makedirs('dataset_cache')\n np.savez(cache_fname, train_velocity_inputs=trainloader.dataset.\n velocity_inputs, train_ssp_inputs=trainloader.dataset.\n ssp_inputs, train_ssp_outputs=trainloader.dataset.ssp_outputs,\n test_velocity_inputs=testloader.dataset.velocity_inputs,\n test_ssp_inputs=testloader.dataset.ssp_inputs, test_ssp_outputs\n =testloader.dataset.ssp_outputs)\nprint('Train and Test Loaders Generation Complete')\nstarts = [0.2] * 10\nends = np.linspace(0.4, 1.0, num=10)\nmasks_parameters = zip(starts, ends.tolist())\nlatest_epoch_scorer = scores.GridScorer(nbins=args.n_image_bins,\n coords_range=((0, 2.2), (0, 2.2)), mask_parameters=masks_parameters)\nfname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix,\n args.n_samples)\nfname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix,\n args.n_samples)\nfname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix,\n args.n_samples)\nfname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix,\n args.n_samples)\nprint('Testing')\nwith torch.no_grad():\n for i, data in enumerate(testloader):\n velocity_inputs, ssp_inputs, ssp_outputs = data\n ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(\n velocity_inputs, ssp_inputs)\n predictions = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))\n coords = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], 2))\n lstm_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],\n model.lstm_hidden_size))\n dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1],\n model.linear_hidden_size))\n assert rollout_length == ssp_pred.shape[0]\n print('Computing predicted locations and true locations')\n for ri in range(rollout_length):\n pred = ssp_pred.detach().numpy()[ri, :, :args.dim]\n predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = ssp_to_loc_v(pred, heatmap_vectors, xs, ys)\n coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]\n coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = ssp_to_loc_v(coord, heatmap_vectors, xs, ys)\n lstm_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :\n ] = lstm_outputs.detach().numpy()[ri, :, :]\n dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[\n 1], :] = dense_outputs.detach().numpy()[ri, :, :]\nprint(np.max(predictions))\nprint(np.min(predictions))\n(grid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred,\n grid_scores_90_separation_pred) = (utils.get_scores_and_plot(scorer=\n latest_epoch_scorer, data_abs_xy=predictions, activations=\n lstm_activations, directory='output_grid_scores', filename=fname_lstm_pred)\n )\n(grid_scores_60_truth, grid_scores_90_truth,\n grid_scores_60_separation_truth, grid_scores_90_separation_truth) = (utils\n .get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=coords,\n activations=lstm_activations, directory='output_grid_scores', filename=\n fname_lstm_truth))\n(grid_scores_60_dense_pred, grid_scores_90_dense_pred,\n grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred\n ) = (utils.get_scores_and_plot(scorer=latest_epoch_scorer, data_abs_xy=\n predictions, activations=dense_activations, directory=\n 'output_grid_scores', filename=fname_dense_pred))\n(grid_scores_60_dense_truth, grid_scores_90_dense_truth,\n grid_scores_60_separation_dense_truth,\n grid_scores_90_separation_dense_truth) = (utils.get_scores_and_plot(\n scorer=latest_epoch_scorer, data_abs_xy=coords, activations=\n dense_activations, directory='output_grid_scores', filename=\n fname_dense_truth))\nprint(grid_scores_60_truth, grid_scores_90_truth,\n grid_scores_60_separation_truth, grid_scores_90_separation_truth)\nfname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix,\n args.n_samples)\nnp.savez(fname, grid_scores_60_pred=grid_scores_60_pred,\n grid_scores_90_pred=grid_scores_90_pred, grid_scores_60_separation_pred\n =grid_scores_60_separation_pred, grid_scores_90_separation_pred=\n grid_scores_90_separation_pred, grid_scores_60_truth=\n grid_scores_60_truth, grid_scores_90_truth=grid_scores_90_truth,\n grid_scores_60_separation_truth=grid_scores_60_separation_truth,\n grid_scores_90_separation_truth=grid_scores_90_separation_truth,\n grid_scores_60_dense_pred=grid_scores_60_dense_pred,\n grid_scores_90_dense_pred=grid_scores_90_dense_pred,\n grid_scores_60_separation_dense_pred=\n grid_scores_60_separation_dense_pred,\n grid_scores_90_separation_dense_pred=\n grid_scores_90_separation_dense_pred, grid_scores_60_dense_truth=\n grid_scores_60_dense_truth, grid_scores_90_dense_truth=\n grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth=\n grid_scores_60_separation_dense_truth,\n grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth\n )\n",
"step-5": "# Compute grid scores using the new dataset format\n\nimport matplotlib\nimport os\n# allow code to work on machines without a display or in a screen session\ndisplay = os.environ.get('DISPLAY')\nif display is None or 'localhost' in display:\n matplotlib.use('agg')\n\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom datasets import train_test_loaders, angular_train_test_loaders, tf_train_test_loaders, load_from_cache\nfrom models import SSPPathIntegrationModel\nfrom datetime import datetime\nfrom tensorboardX import SummaryWriter\nimport json\nfrom spatial_semantic_pointers.utils import get_heatmap_vectors, ssp_to_loc, ssp_to_loc_v\nfrom spatial_semantic_pointers.plots import plot_predictions, plot_predictions_v\nimport matplotlib.pyplot as plt\nfrom path_integration_utils import pc_to_loc_v, encoding_func_from_model, pc_gauss_encoding_func, ssp_encoding_func, \\\n hd_gauss_encoding_func, hex_trig_encoding_func\nfrom ssp_navigation.utils.encodings import get_encoding_function\n\nimport grid_scoring.scores as scores\nimport grid_scoring.utils as utils\n# from grid_scoring.run_network import run_and_gather_activations, run_and_gather_localization_activations\nfrom path_integration_utils import encoding_func_from_model, pc_gauss_encoding_func\n\n\nparser = argparse.ArgumentParser('Compute grid scores for a path integration model')\nparser.add_argument('--n-samples', type=int, default=5000)\nparser.add_argument('--use-localization', action='store_true')\n# TODO: use these parameters\nparser.add_argument('--dataset', type=str, default='')\nparser.add_argument('--model', type=str, default='')\nparser.add_argument('--fname-prefix', type=str, default='sac')\n\nparser.add_argument('--spatial-encoding', type=str, default='ssp',\n choices=[\n 'ssp', 'hex-ssp', 'periodic-hex-ssp', 'grid-ssp', 'ind-ssp', 'orth-proj-ssp',\n 'rec-ssp', 'rec-hex-ssp', 'rec-ind-ssp', 'sub-toroid-ssp', 'var-sub-toroid-ssp',\n 'random', '2d', '2d-normalized', 'one-hot', 'hex-trig',\n 'trig', 'random-trig', 'random-rotated-trig', 'random-proj', 'legendre',\n 'learned', 'learned-normalized', 'frozen-learned', 'frozen-learned-normalized',\n 'pc-gauss', 'pc-dog', 'tile-coding'\n ])\n # choices=['ssp', '2d', 'frozen-learned', 'pc-gauss', 'pc-dog', 'pc-gauss-softmax', 'hex-trig', 'hex-trig-all-freq'])\nparser.add_argument('--frozen-model', type=str, default='', help='model to use frozen encoding weights from')\nparser.add_argument('--pc-gauss-sigma', type=float, default=0.25)\nparser.add_argument('--pc-diff-sigma', type=float, default=0.5)\nparser.add_argument('--hex-freq-coef', type=float, default=2.5, help='constant to scale frequencies by')\nparser.add_argument('--n-tiles', type=int, default=8, help='number of layers for tile coding')\nparser.add_argument('--n-bins', type=int, default=8, help='number of bins for tile coding')\nparser.add_argument('--ssp-scaling', type=float, default=1.0)\nparser.add_argument('--grid-ssp-min', type=float, default=0.25, help='minimum plane wave scale')\nparser.add_argument('--grid-ssp-max', type=float, default=2.0, help='maximum plane wave scale')\nparser.add_argument('--phi', type=float, default=0.5, help='phi as a fraction of pi for orth-proj-ssp')\nparser.add_argument('--n-proj', type=int, default=3, help='projection dimension for sub toroids')\nparser.add_argument('--scale-ratio', type=float, default=0, help='ratio between sub toroid scales')\nparser.add_argument('--hilbert-points', type=int, default=1, choices=[0, 1, 2, 3],\n help='pc centers. 0: random uniform. 1: hilbert curve. 2: evenly spaced grid. 3: hex grid')\n\nparser.add_argument('--seed', type=int, default=13)\nparser.add_argument('--dropout-p', type=float, default=0.5)\nparser.add_argument('--dim', type=int, default=512)\nparser.add_argument('--train-split', type=float, default=0.8, help='Training fraction of the train/test split')\nparser.add_argument('--allow-cache', action='store_true',\n help='once the dataset has been generated, it will be saved to a file to be loaded faster')\n\nparser.add_argument('--trajectory-length', type=int, default=100)\nparser.add_argument('--minibatch-size', type=int, default=10)\n\nparser.add_argument('--n-image-bins', type=int, default=20)\n\nparser.add_argument('--n-hd-cells', type=int, default=0, help='If non-zero, use linear and angular velocity as well as HD cell output')\nparser.add_argument('--sin-cos-ang', type=int, default=1, choices=[0, 1],\n help='Use the sin and cos of the angular velocity if angular velocities are used')\nparser.add_argument('--use-lmu', action='store_true')\nparser.add_argument('--lmu-order', type=int, default=6)\n\nparser.add_argument('--no-cache-load', action='store_true', help='do not load from cache')\n\nargs = parser.parse_args()\n\nssp_scaling = args.ssp_scaling\n\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\ndata = np.load(args.dataset)\n\n# only used for frozen-learned and other custom encoding functions\n# encoding_func = None\n\nlimit_low = 0 #* args.ssp_scaling\nlimit_high = 2.2 #* args.ssp_scaling\nres = 128 #256\n\nencoding_func, dim = get_encoding_function(args, limit_low=limit_low, limit_high=limit_high)\n\nxs = np.linspace(limit_low, limit_high, res)\nys = np.linspace(limit_low, limit_high, res)\n\n# FIXME: inefficient but will work for now\nheatmap_vectors = np.zeros((len(xs), len(ys), dim))\n\nprint(\"Generating Heatmap Vectors\")\n\nfor i, x in enumerate(xs):\n for j, y in enumerate(ys):\n heatmap_vectors[i, j, :] = encoding_func(\n # batch dim\n # np.array(\n # [[x, y]]\n # )\n # no batch dim\n # np.array(\n # [x, y]\n # )\n # new signature\n x=x, y=y\n )\n\n heatmap_vectors[i, j, :] /= np.linalg.norm(heatmap_vectors[i, j, :])\n\nprint(\"Heatmap Vector Generation Complete\")\n\nn_samples = args.n_samples\nrollout_length = args.trajectory_length\nbatch_size = args.minibatch_size\n\n\nif args.n_hd_cells > 0:\n hd_encoding_func = hd_gauss_encoding_func(dim=args.n_hd_cells, sigma=0.25, use_softmax=False, rng=np.random.RandomState(args.seed))\n if args.sin_cos_ang:\n input_size = 3\n else:\n input_size = 2\n model = SSPPathIntegrationModel(\n input_size=input_size, unroll_length=rollout_length,\n sp_dim=dim + args.n_hd_cells, dropout_p=args.dropout_p, use_lmu=args.use_lmu, order=args.lmu_order\n )\nelse:\n hd_encoding_func = None\n model = SSPPathIntegrationModel(\n input_size=2, unroll_length=rollout_length,\n sp_dim=dim, dropout_p=args.dropout_p, use_lmu=args.use_lmu, order=args.lmu_order\n )\n\n\n# model = SSPPathIntegrationModel(unroll_length=rollout_length, sp_dim=dim, dropout_p=args.dropout_p)\n\nmodel.load_state_dict(torch.load(args.model), strict=False)\n\nmodel.eval()\n\n# encoding specific cache string\nencoding_specific = ''\nif 'ssp' in args.spatial_encoding:\n encoding_specific = args.ssp_scaling\nelif args.spatial_encoding == 'frozen-learned':\n encoding_specific = args.frozen_model\nelif args.spatial_encoding == 'pc-gauss' or args.spatial_encoding == 'pc-gauss-softmax':\n encoding_specific = args.pc_gauss_sigma\nelif args.spatial_encoding == 'pc-dog':\n encoding_specific = '{}-{}'.format(args.pc_gauss_sigma, args.pc_diff_sigma)\nelif args.spatial_encoding == 'hex-trig':\n encoding_specific = args.hex_freq_coef\n\nif 'tf' in args.dataset:\n cache_fname = 'dataset_cache/tf_{}_{}_{}_{}_{}_{}.npz'.format(\n args.spatial_encoding, args.dim, args.seed, args.n_samples, args.n_hd_cells, encoding_specific\n )\nelse:\n cache_fname = 'dataset_cache/{}_{}_{}_{}_{}_{}.npz'.format(\n args.spatial_encoding, args.dim, args.seed, args.n_samples, args.n_hd_cells, encoding_specific\n )\n\n# if the file exists, load it from cache\nif os.path.exists(cache_fname) and not args.no_cache_load:\n print(\"Generating Train and Test Loaders from Cache\")\n trainloader, testloader = load_from_cache(cache_fname, batch_size=batch_size, n_samples=n_samples)\nelse:\n print(\"Generating Train and Test Loaders\")\n\n if 'tf' in args.dataset:\n # tfrecord dataset only supports using the sin and cos of angular velocity\n assert args.sin_cos_ang == 1\n\n trainloader, testloader = tf_train_test_loaders(\n data,\n n_train_samples=n_samples,\n n_test_samples=n_samples,\n rollout_length=rollout_length,\n batch_size=batch_size,\n encoding=args.spatial_encoding,\n encoding_func=encoding_func,\n encoding_dim=args.dim,\n train_split=args.train_split,\n hd_dim=args.n_hd_cells,\n hd_encoding_func=hd_encoding_func,\n sin_cos_ang=args.sin_cos_ang,\n )\n\n else:\n\n if args.n_hd_cells > 0:\n trainloader, testloader = angular_train_test_loaders(\n data,\n n_train_samples=n_samples,\n n_test_samples=n_samples,\n rollout_length=rollout_length,\n batch_size=batch_size,\n encoding=args.spatial_encoding,\n encoding_func=encoding_func,\n encoding_dim=args.dim,\n train_split=args.train_split,\n hd_dim=args.n_hd_cells,\n hd_encoding_func=hd_encoding_func,\n sin_cos_ang=args.sin_cos_ang,\n )\n else:\n trainloader, testloader = train_test_loaders(\n data,\n n_train_samples=n_samples,\n n_test_samples=n_samples,\n rollout_length=rollout_length,\n batch_size=batch_size,\n encoding=args.spatial_encoding,\n encoding_func=encoding_func,\n encoding_dim=args.dim,\n train_split=args.train_split,\n )\n\n if args.allow_cache:\n\n if not os.path.exists('dataset_cache'):\n os.makedirs('dataset_cache')\n\n np.savez(\n cache_fname,\n train_velocity_inputs=trainloader.dataset.velocity_inputs,\n train_ssp_inputs=trainloader.dataset.ssp_inputs,\n train_ssp_outputs=trainloader.dataset.ssp_outputs,\n test_velocity_inputs=testloader.dataset.velocity_inputs,\n test_ssp_inputs=testloader.dataset.ssp_inputs,\n test_ssp_outputs=testloader.dataset.ssp_outputs,\n )\n\nprint(\"Train and Test Loaders Generation Complete\")\n\nstarts = [0.2] * 10\nends = np.linspace(0.4, 1.0, num=10)\nmasks_parameters = zip(starts, ends.tolist())\nlatest_epoch_scorer = scores.GridScorer(\n nbins=args.n_image_bins,\n coords_range=((0, 2.2), (0, 2.2)), # data_reader.get_coord_range(),\n mask_parameters=masks_parameters,\n)\n\n\nfname_lstm_pred = '{}_{}samples_lstm_pred.pdf'.format(args.fname_prefix, args.n_samples)\nfname_lstm_truth = '{}_{}samples_lstm_truth.pdf'.format(args.fname_prefix, args.n_samples)\nfname_dense_pred = '{}_{}samples_dense_pred.pdf'.format(args.fname_prefix, args.n_samples)\nfname_dense_truth = '{}_{}samples_dense_truth.pdf'.format(args.fname_prefix, args.n_samples)\n\n# Run and gather activations\n\nprint(\"Testing\")\nwith torch.no_grad():\n # Everything is in one batch, so this loop will only happen once\n for i, data in enumerate(testloader):\n velocity_inputs, ssp_inputs, ssp_outputs = data\n\n ssp_pred, lstm_outputs, dense_outputs = model.forward_activations(velocity_inputs, ssp_inputs)\n\n predictions = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], 2))\n coords = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], 2))\n lstm_activations = np.zeros((ssp_pred.shape[0]*ssp_pred.shape[1], model.lstm_hidden_size))\n dense_activations = np.zeros((ssp_pred.shape[0] * ssp_pred.shape[1], model.linear_hidden_size))\n\n assert rollout_length == ssp_pred.shape[0]\n\n # # For each neuron, contains the average activity at each spatial bin\n # # Computing for both ground truth and predicted location\n # rate_maps_pred = np.zeros((model.lstm_hidden_size, len(xs), len(ys)))\n # rate_maps_truth = np.zeros((model.lstm_hidden_size, len(xs), len(ys)))\n\n print(\"Computing predicted locations and true locations\")\n # Using all data, one chunk at a time\n for ri in range(rollout_length):\n\n # trim out head direction info if that was included by only looking up to args.encoding_dim\n\n # computing 'predicted' coordinates, where the agent thinks it is\n pred = ssp_pred.detach().numpy()[ri, :, :args.dim]\n # pred = pred / pred.sum(axis=1)[:, np.newaxis]\n predictions[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = ssp_to_loc_v(\n pred,\n heatmap_vectors, xs, ys\n )\n\n # computing 'ground truth' coordinates, where the agent should be\n coord = ssp_outputs.detach().numpy()[:, ri, :args.dim]\n # coord = coord / coord.sum(axis=1)[:, np.newaxis]\n coords[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = ssp_to_loc_v(\n coord,\n heatmap_vectors, xs, ys\n )\n\n # reshaping activations and converting to numpy array\n lstm_activations[ri*ssp_pred.shape[1]:(ri+1)*ssp_pred.shape[1], :] = lstm_outputs.detach().numpy()[ri, :, :]\n dense_activations[ri * ssp_pred.shape[1]:(ri + 1) * ssp_pred.shape[1], :] = dense_outputs.detach().numpy()[ri, :, :]\n\n# predictions = predictions / args.ssp_scaling\n# coords = coords / args.ssp_scaling\n\nprint(np.max(predictions))\nprint(np.min(predictions))\n\ngrid_scores_60_pred, grid_scores_90_pred, grid_scores_60_separation_pred, grid_scores_90_separation_pred = utils.get_scores_and_plot(\n scorer=latest_epoch_scorer,\n data_abs_xy=predictions, #res['pos_xy'],\n activations=lstm_activations, #res['bottleneck'],\n directory='output_grid_scores', #FLAGS.saver_results_directory,\n filename=fname_lstm_pred,\n)\n\ngrid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth = utils.get_scores_and_plot(\n scorer=latest_epoch_scorer,\n data_abs_xy=coords, #res['pos_xy'],\n activations=lstm_activations, #res['bottleneck'],\n directory='output_grid_scores', #FLAGS.saver_results_directory,\n filename=fname_lstm_truth,\n)\n\ngrid_scores_60_dense_pred, grid_scores_90_dense_pred, grid_scores_60_separation_dense_pred, grid_scores_90_separation_dense_pred = utils.get_scores_and_plot(\n scorer=latest_epoch_scorer,\n data_abs_xy=predictions, #res['pos_xy'],\n activations=dense_activations, #res['bottleneck'],\n directory='output_grid_scores', #FLAGS.saver_results_directory,\n filename=fname_dense_pred,\n)\n\ngrid_scores_60_dense_truth, grid_scores_90_dense_truth, grid_scores_60_separation_dense_truth, grid_scores_90_separation_dense_truth = utils.get_scores_and_plot(\n scorer=latest_epoch_scorer,\n data_abs_xy=coords, #res['pos_xy'],\n activations=dense_activations, #res['bottleneck'],\n directory='output_grid_scores', #FLAGS.saver_results_directory,\n filename=fname_dense_truth,\n)\n\n\nprint(grid_scores_60_truth, grid_scores_90_truth, grid_scores_60_separation_truth, grid_scores_90_separation_truth)\n\n# Saving to make grid score values easy to compare for different variations\nfname = 'output_grid_scores/{}_{}samples.npz'.format(args.fname_prefix, args.n_samples)\nnp.savez(\n fname,\n grid_scores_60_pred=grid_scores_60_pred,\n grid_scores_90_pred=grid_scores_90_pred,\n grid_scores_60_separation_pred=grid_scores_60_separation_pred,\n grid_scores_90_separation_pred=grid_scores_90_separation_pred,\n grid_scores_60_truth=grid_scores_60_truth,\n grid_scores_90_truth=grid_scores_90_truth,\n grid_scores_60_separation_truth=grid_scores_60_separation_truth,\n grid_scores_90_separation_truth=grid_scores_90_separation_truth,\n\n grid_scores_60_dense_pred=grid_scores_60_dense_pred,\n grid_scores_90_dense_pred=grid_scores_90_dense_pred,\n grid_scores_60_separation_dense_pred=grid_scores_60_separation_dense_pred,\n grid_scores_90_separation_dense_pred=grid_scores_90_separation_dense_pred,\n grid_scores_60_dense_truth=grid_scores_60_dense_truth,\n grid_scores_90_dense_truth=grid_scores_90_dense_truth,\n grid_scores_60_separation_dense_truth=grid_scores_60_separation_dense_truth,\n grid_scores_90_separation_dense_truth=grid_scores_90_separation_dense_truth,\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Dock(pydantic.BaseModel):
fleet_name: str = ''
params: List[DockParameter] = []
class Config:
orm_mode = True
<|reserved_special_token_1|>
from typing import List
import pydantic
from ..rmf_fleet_msgs.DockParameter import DockParameter
class Dock(pydantic.BaseModel):
fleet_name: str = ''
params: List[DockParameter] = []
class Config:
orm_mode = True
<|reserved_special_token_1|>
# This is a generated file, do not edit
from typing import List
import pydantic
from ..rmf_fleet_msgs.DockParameter import DockParameter
class Dock(pydantic.BaseModel):
fleet_name: str = "" # string
params: List[DockParameter] = [] # rmf_fleet_msgs/DockParameter
class Config:
orm_mode = True
# string fleet_name
# DockParameter[] params
|
flexible
|
{
"blob_id": "62d0818395a6093ebf2c410aaadeb8a0250707ab",
"index": 3865,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Dock(pydantic.BaseModel):\n fleet_name: str = ''\n params: List[DockParameter] = []\n\n\n class Config:\n orm_mode = True\n",
"step-3": "from typing import List\nimport pydantic\nfrom ..rmf_fleet_msgs.DockParameter import DockParameter\n\n\nclass Dock(pydantic.BaseModel):\n fleet_name: str = ''\n params: List[DockParameter] = []\n\n\n class Config:\n orm_mode = True\n",
"step-4": "# This is a generated file, do not edit\n\nfrom typing import List\n\nimport pydantic\n\nfrom ..rmf_fleet_msgs.DockParameter import DockParameter\n\n\nclass Dock(pydantic.BaseModel):\n fleet_name: str = \"\" # string\n params: List[DockParameter] = [] # rmf_fleet_msgs/DockParameter\n\n class Config:\n orm_mode = True\n\n\n# string fleet_name\n# DockParameter[] params\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def S_q(theta, a0=1, b0=0.01):
w = theta[:, :-1]
s = tf.reshape(theta[:, -1], shape=[-1, 1])
y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))
y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])
dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)
dw_prior = -s ** 2 * w
dw = dw_data * N / mb_size_x + dw_prior
w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])
ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)
return tf.concat([dw, ds], axis=1)
def rbf_kernel(x, dim=X_dim, h=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = tf.exp(-pdist / h ** 2 / 2.0)
sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)
dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2
dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4
return kxy, dxkxy, dxykxy_tr
def imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = (c + pdist) ** beta
coeff = 2 * beta * (c + pdist) ** (beta - 1)
dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.
reduce_sum(coeff, axis=1), 1))
dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +
(-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)
return kxy, dxkxy, dxykxy_tr
<|reserved_special_token_0|>
def ksd_emp(x, ap=1, dim=X_dim):
sq = S_q(x, ap)
kxy, dxkxy, dxykxy_tr = Kernel(x, dim)
t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr
t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))
n = tf.cast(tf.shape(x)[0], tf.float64)
ksd = (tf.reduce_sum(t13) + t2) / n ** 2
return ksd
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sample_z(m, n, sd=10.0):
return np.random.normal(0, sd, size=[m, n])
def S_q(theta, a0=1, b0=0.01):
w = theta[:, :-1]
s = tf.reshape(theta[:, -1], shape=[-1, 1])
y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))
y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])
dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)
dw_prior = -s ** 2 * w
dw = dw_data * N / mb_size_x + dw_prior
w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])
ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)
return tf.concat([dw, ds], axis=1)
def rbf_kernel(x, dim=X_dim, h=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = tf.exp(-pdist / h ** 2 / 2.0)
sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)
dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2
dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4
return kxy, dxkxy, dxykxy_tr
def imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = (c + pdist) ** beta
coeff = 2 * beta * (c + pdist) ** (beta - 1)
dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.
reduce_sum(coeff, axis=1), 1))
dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +
(-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)
return kxy, dxkxy, dxykxy_tr
<|reserved_special_token_0|>
def ksd_emp(x, ap=1, dim=X_dim):
sq = S_q(x, ap)
kxy, dxkxy, dxykxy_tr = Kernel(x, dim)
t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr
t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))
n = tf.cast(tf.shape(x)[0], tf.float64)
ksd = (tf.reduce_sum(t13) + t2) / n ** 2
return ksd
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sample_z(m, n, sd=10.0):
return np.random.normal(0, sd, size=[m, n])
def S_q(theta, a0=1, b0=0.01):
w = theta[:, :-1]
s = tf.reshape(theta[:, -1], shape=[-1, 1])
y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))
y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])
dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)
dw_prior = -s ** 2 * w
dw = dw_data * N / mb_size_x + dw_prior
w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])
ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)
return tf.concat([dw, ds], axis=1)
def rbf_kernel(x, dim=X_dim, h=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = tf.exp(-pdist / h ** 2 / 2.0)
sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)
dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2
dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4
return kxy, dxkxy, dxykxy_tr
def imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = (c + pdist) ** beta
coeff = 2 * beta * (c + pdist) ** (beta - 1)
dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.
reduce_sum(coeff, axis=1), 1))
dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +
(-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)
return kxy, dxkxy, dxykxy_tr
<|reserved_special_token_0|>
def ksd_emp(x, ap=1, dim=X_dim):
sq = S_q(x, ap)
kxy, dxkxy, dxykxy_tr = Kernel(x, dim)
t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr
t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))
n = tf.cast(tf.shape(x)[0], tf.float64)
ksd = (tf.reduce_sum(t13) + t2) / n ** 2
return ksd
def generator(z):
G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
out = 10.0 * tf.matmul(G_h2, G_W3) + G_b3
return out
def evaluation(theta, X_t=X_test, y_t=y_test):
w = theta[:, :-1]
y = y_t.reshape([-1, 1])
coff = -np.matmul(y * X_t, w.T)
prob = np.mean(1.0 / (1 + np.exp(coff)), axis=1)
acc = np.mean(prob > 0.5)
llh = np.mean(np.log(prob))
return acc, llh
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tf.reset_default_graph()
<|reserved_special_token_0|>
def sample_z(m, n, sd=10.0):
return np.random.normal(0, sd, size=[m, n])
def S_q(theta, a0=1, b0=0.01):
w = theta[:, :-1]
s = tf.reshape(theta[:, -1], shape=[-1, 1])
y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))
y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])
dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)
dw_prior = -s ** 2 * w
dw = dw_data * N / mb_size_x + dw_prior
w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])
ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)
return tf.concat([dw, ds], axis=1)
def rbf_kernel(x, dim=X_dim, h=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = tf.exp(-pdist / h ** 2 / 2.0)
sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)
dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2
dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4
return kxy, dxkxy, dxykxy_tr
def imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x
)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)
kxy = (c + pdist) ** beta
coeff = 2 * beta * (c + pdist) ** (beta - 1)
dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.
reduce_sum(coeff, axis=1), 1))
dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +
(-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)
return kxy, dxkxy, dxykxy_tr
<|reserved_special_token_0|>
def ksd_emp(x, ap=1, dim=X_dim):
sq = S_q(x, ap)
kxy, dxkxy, dxykxy_tr = Kernel(x, dim)
t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr
t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))
n = tf.cast(tf.shape(x)[0], tf.float64)
ksd = (tf.reduce_sum(t13) + t2) / n ** 2
return ksd
def generator(z):
G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
out = 10.0 * tf.matmul(G_h2, G_W3) + G_b3
return out
def evaluation(theta, X_t=X_test, y_t=y_test):
w = theta[:, :-1]
y = y_t.reshape([-1, 1])
coff = -np.matmul(y * X_t, w.T)
prob = np.mean(1.0 / (1 + np.exp(coff)), axis=1)
acc = np.mean(prob > 0.5)
llh = np.mean(np.log(prob))
return acc, llh
<|reserved_special_token_0|>
sess.run(tf.global_variables_initializer())
<|reserved_special_token_0|>
for it in range(n_iter):
batch = [(i % N) for i in range(it * mb_size_x, (it + 1) * mb_size_x)]
X_b = X_train[batch, :]
y_b = y_train[batch]
_, loss_curr = sess.run([solver_KSD, ksd], feed_dict={Xs: X_b, ys: y_b,
z: sample_z(mb_size, z_dim)})
ksd_loss[it] = loss_curr
if it % iter_eval == 0:
post = sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim)})
post_eval = evaluation(post)
acc[it // iter_eval] = post_eval[0]
loglik[it // iter_eval] = post_eval[1]
plt.plot(ksd)
plt.axvline(np.argmin(ksd_loss), color='r')
plt.title('KSD loss (min={:.04f} at iter {})'.format(np.min(ksd_loss), np.
argmin(ksd_loss)))
plt.show()
plt.close()
plt.plot(np.arange(len(acc)) * iter_eval, acc)
plt.ylim(top=0.8)
plt.axhline(0.75, color='g')
plt.title('Accuracy (max={:0.4f} at iter {})'.format(np.max(acc), np.argmax
(acc) * iter_eval))
plt.show()
plt.close()
<|reserved_special_token_1|>
"""
SteinNS: BayesianLogisticRegression_KSD.py
Created on 10/9/18 6:25 PM
@author: Hanxi Sun
"""
import tensorflow as tf
import numpy as np
import scipy.io
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
########################################################################################################################
# Data
data = scipy.io.loadmat("data/covertype.mat")
X_input = data['covtype'][:, 1:]
y_input = data['covtype'][:, 0]
y_input[y_input == 2] = -1
N_all = X_input.shape[0]
X_input = np.hstack([X_input, np.ones([N_all, 1])])
d = X_input.shape[1]
X_dim = d + 1 # dimension of the target distribution
# split the data set into training and testing
X_train, X_test, y_train, y_test = train_test_split(X_input, y_input, test_size=0.2, random_state=21)
X_train_tf = tf.convert_to_tensor(X_train, dtype=tf.float64)
X_test_tf = tf.convert_to_tensor(X_test, dtype=tf.float64)
y_train_tf = tf.convert_to_tensor(y_train, dtype=tf.float64)
y_test_tf = tf.convert_to_tensor(y_test, dtype=tf.float64)
N = X_train.shape[0]
########################################################################################################################
# model parameters
lr = 4e-4 # learning rate
kernel = "rbf" # "rbf" or "imq" kernel
z_dim = 100
h_dim_g = 200
mb_size_x = 100 # date mini-batch size
mb_size = 100 # sample mini-batch size
n_iter = 200000
iter_eval = 1000
optimizer = tf.train.RMSPropOptimizer
########################################################################################################################
# network
tf.reset_default_graph()
initializer = tf.contrib.layers.xavier_initializer()
Xs = tf.placeholder(tf.float64, shape=[None, d])
ys = tf.placeholder(tf.float64, shape=[None])
z = tf.placeholder(tf.float64, shape=[None, z_dim])
G_W1 = tf.get_variable('g_w1', [z_dim, h_dim_g], dtype=tf.float64, initializer=initializer)
G_b1 = tf.get_variable('g_b1', [h_dim_g], dtype=tf.float64, initializer=initializer)
G_W2 = tf.get_variable('g_w2', [h_dim_g, h_dim_g], dtype=tf.float64, initializer=initializer)
G_b2 = tf.get_variable('g_b2', [h_dim_g], dtype=tf.float64, initializer=initializer)
G_W3 = tf.get_variable('g_w3', [h_dim_g, X_dim], dtype=tf.float64, initializer=initializer)
G_b3 = tf.get_variable('g_b3', [X_dim], dtype=tf.float64, initializer=initializer)
theta_G = [G_W1, G_b1, G_W2, G_b2, G_W3, G_b3]
########################################################################################################################
# functions & structures
def sample_z(m, n, sd=10.):
return np.random.normal(0, sd, size=[m, n])
def S_q(theta, a0=1, b0=0.01):
# Reference:
# https://github.com/DartML/Stein-Variational-Gradient-Descent/blob/master/python/bayesian_logistic_regression.py
w = theta[:, :-1] # (m, d)
s = tf.reshape(theta[:, -1], shape=[-1, 1]) # (m, 1); alpha = s**2
y_hat = 1. / (1. + tf.exp(- tf.matmul(Xs, tf.transpose(w)))) # (mx, m); shape(Xs) = (mx, d)
y = tf.reshape((ys + 1.) / 2., shape=[-1, 1]) # (mx, 1)
dw_data = tf.matmul(tf.transpose(y - y_hat), Xs) # (m, d)
dw_prior = - s**2 * w # (m, d)
dw = dw_data * N / mb_size_x + dw_prior # (m, d)
w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1]) # (m, 1); = wtw
ds = (2. * a0 - 2 + d) / s - tf.multiply(w2 + 2. * b0, s) # (m, 1)
return tf.concat([dw, ds], axis=1)
def rbf_kernel(x, dim=X_dim, h=1.):
# Reference 1: https://github.com/ChunyuanLI/SVGD/blob/master/demo_svgd.ipynb
# Reference 2: https://github.com/yc14600/svgd/blob/master/svgd.py
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY) # pairwise distance matrix
kxy = tf.exp(- pdist / h ** 2 / 2.0) # kernel matrix
sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)
dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / (h ** 2) # sum_y dk(x, y)/dx
dxykxy_tr = tf.multiply((dim * (h**2) - pdist), kxy) / (h**4) # tr( dk(x, y)/dxdy )
return kxy, dxkxy, dxykxy_tr
def imq_kernel(x, dim=X_dim, beta=-.5, c=1.):
XY = tf.matmul(x, tf.transpose(x))
X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x)[0], 1])
X2 = tf.tile(X2_, [1, tf.shape(x)[0]])
pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY) # pairwise distance matrix
kxy = (c + pdist) ** beta
coeff = 2 * beta * ((c + pdist) ** (beta-1))
dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.reduce_sum(coeff, axis=1), 1))
dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2),
- 2 * dim * c * beta + (- 4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)
return kxy, dxkxy, dxykxy_tr
kernels = {"rbf": rbf_kernel,
"imq": imq_kernel}
Kernel = kernels[kernel]
def ksd_emp(x, ap=1, dim=X_dim):
sq = S_q(x, ap)
kxy, dxkxy, dxykxy_tr = Kernel(x, dim)
t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr
t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))
n = tf.cast(tf.shape(x)[0], tf.float64)
# ksd = (tf.reduce_sum(t13) - tf.trace(t13) + t2) / (n * (n-1))
ksd = (tf.reduce_sum(t13) + t2) / (n ** 2)
return ksd
def generator(z):
G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)
G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)
out = 10. * tf.matmul(G_h2, G_W3) + G_b3
return out
def evaluation(theta, X_t=X_test, y_t=y_test):
w = theta[:, :-1]
y = y_t.reshape([-1, 1])
coff = - np.matmul(y * X_t, w.T)
prob = np.mean(1. / (1 + np.exp(coff)), axis=1)
acc = np.mean(prob > .5)
llh = np.mean(np.log(prob))
return acc, llh
G_sample = generator(z)
ksd = ksd_emp(G_sample)
solver_KSD = optimizer(learning_rate=lr).minimize(ksd, var_list=theta_G)
#######################################################################################################################
sess = tf.Session()
sess.run(tf.global_variables_initializer())
ksd_loss = np.zeros(n_iter)
acc = np.zeros(1 + (n_iter // iter_eval))
loglik = np.zeros(1 + (n_iter // iter_eval))
for it in range(n_iter):
batch = [i % N for i in range(it * mb_size_x, (it + 1) * mb_size_x)]
X_b = X_train[batch, :]
y_b = y_train[batch]
_, loss_curr = sess.run([solver_KSD, ksd], feed_dict={Xs: X_b, ys: y_b, z: sample_z(mb_size, z_dim)})
ksd_loss[it] = loss_curr
if it % iter_eval == 0:
post = sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim)})
post_eval = evaluation(post)
acc[it // iter_eval] = post_eval[0]
loglik[it // iter_eval] = post_eval[1]
plt.plot(ksd)
plt.axvline(np.argmin(ksd_loss), color="r")
plt.title("KSD loss (min={:.04f} at iter {})".format(np.min(ksd_loss), np.argmin(ksd_loss)))
plt.show()
plt.close()
plt.plot(np.arange(len(acc)) * iter_eval, acc)
plt.ylim(top=0.8)
plt.axhline(0.75, color="g")
plt.title("Accuracy (max={:0.4f} at iter {})".format(np.max(acc), np.argmax(acc)*iter_eval))
plt.show()
plt.close()
|
flexible
|
{
"blob_id": "a0a9527268fb5f8ea24de700f7700b874fbf4a6b",
"index": 4838,
"step-1": "<mask token>\n\n\ndef S_q(theta, a0=1, b0=0.01):\n w = theta[:, :-1]\n s = tf.reshape(theta[:, -1], shape=[-1, 1])\n y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))\n y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])\n dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)\n dw_prior = -s ** 2 * w\n dw = dw_data * N / mb_size_x + dw_prior\n w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])\n ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)\n return tf.concat([dw, ds], axis=1)\n\n\ndef rbf_kernel(x, dim=X_dim, h=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = tf.exp(-pdist / h ** 2 / 2.0)\n sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)\n dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2\n dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4\n return kxy, dxkxy, dxykxy_tr\n\n\ndef imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = (c + pdist) ** beta\n coeff = 2 * beta * (c + pdist) ** (beta - 1)\n dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.\n reduce_sum(coeff, axis=1), 1))\n dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +\n (-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)\n return kxy, dxkxy, dxykxy_tr\n\n\n<mask token>\n\n\ndef ksd_emp(x, ap=1, dim=X_dim):\n sq = S_q(x, ap)\n kxy, dxkxy, dxykxy_tr = Kernel(x, dim)\n t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr\n t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))\n n = tf.cast(tf.shape(x)[0], tf.float64)\n ksd = (tf.reduce_sum(t13) + t2) / n ** 2\n return ksd\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sample_z(m, n, sd=10.0):\n return np.random.normal(0, sd, size=[m, n])\n\n\ndef S_q(theta, a0=1, b0=0.01):\n w = theta[:, :-1]\n s = tf.reshape(theta[:, -1], shape=[-1, 1])\n y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))\n y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])\n dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)\n dw_prior = -s ** 2 * w\n dw = dw_data * N / mb_size_x + dw_prior\n w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])\n ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)\n return tf.concat([dw, ds], axis=1)\n\n\ndef rbf_kernel(x, dim=X_dim, h=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = tf.exp(-pdist / h ** 2 / 2.0)\n sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)\n dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2\n dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4\n return kxy, dxkxy, dxykxy_tr\n\n\ndef imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = (c + pdist) ** beta\n coeff = 2 * beta * (c + pdist) ** (beta - 1)\n dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.\n reduce_sum(coeff, axis=1), 1))\n dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +\n (-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)\n return kxy, dxkxy, dxykxy_tr\n\n\n<mask token>\n\n\ndef ksd_emp(x, ap=1, dim=X_dim):\n sq = S_q(x, ap)\n kxy, dxkxy, dxykxy_tr = Kernel(x, dim)\n t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr\n t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))\n n = tf.cast(tf.shape(x)[0], tf.float64)\n ksd = (tf.reduce_sum(t13) + t2) / n ** 2\n return ksd\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sample_z(m, n, sd=10.0):\n return np.random.normal(0, sd, size=[m, n])\n\n\ndef S_q(theta, a0=1, b0=0.01):\n w = theta[:, :-1]\n s = tf.reshape(theta[:, -1], shape=[-1, 1])\n y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))\n y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])\n dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)\n dw_prior = -s ** 2 * w\n dw = dw_data * N / mb_size_x + dw_prior\n w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])\n ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)\n return tf.concat([dw, ds], axis=1)\n\n\ndef rbf_kernel(x, dim=X_dim, h=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = tf.exp(-pdist / h ** 2 / 2.0)\n sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)\n dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2\n dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4\n return kxy, dxkxy, dxykxy_tr\n\n\ndef imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = (c + pdist) ** beta\n coeff = 2 * beta * (c + pdist) ** (beta - 1)\n dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.\n reduce_sum(coeff, axis=1), 1))\n dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +\n (-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)\n return kxy, dxkxy, dxykxy_tr\n\n\n<mask token>\n\n\ndef ksd_emp(x, ap=1, dim=X_dim):\n sq = S_q(x, ap)\n kxy, dxkxy, dxykxy_tr = Kernel(x, dim)\n t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr\n t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))\n n = tf.cast(tf.shape(x)[0], tf.float64)\n ksd = (tf.reduce_sum(t13) + t2) / n ** 2\n return ksd\n\n\ndef generator(z):\n G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)\n G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)\n out = 10.0 * tf.matmul(G_h2, G_W3) + G_b3\n return out\n\n\ndef evaluation(theta, X_t=X_test, y_t=y_test):\n w = theta[:, :-1]\n y = y_t.reshape([-1, 1])\n coff = -np.matmul(y * X_t, w.T)\n prob = np.mean(1.0 / (1 + np.exp(coff)), axis=1)\n acc = np.mean(prob > 0.5)\n llh = np.mean(np.log(prob))\n return acc, llh\n\n\n<mask token>\n",
"step-4": "<mask token>\ntf.reset_default_graph()\n<mask token>\n\n\ndef sample_z(m, n, sd=10.0):\n return np.random.normal(0, sd, size=[m, n])\n\n\ndef S_q(theta, a0=1, b0=0.01):\n w = theta[:, :-1]\n s = tf.reshape(theta[:, -1], shape=[-1, 1])\n y_hat = 1.0 / (1.0 + tf.exp(-tf.matmul(Xs, tf.transpose(w))))\n y = tf.reshape((ys + 1.0) / 2.0, shape=[-1, 1])\n dw_data = tf.matmul(tf.transpose(y - y_hat), Xs)\n dw_prior = -s ** 2 * w\n dw = dw_data * N / mb_size_x + dw_prior\n w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1])\n ds = (2.0 * a0 - 2 + d) / s - tf.multiply(w2 + 2.0 * b0, s)\n return tf.concat([dw, ds], axis=1)\n\n\ndef rbf_kernel(x, dim=X_dim, h=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = tf.exp(-pdist / h ** 2 / 2.0)\n sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)\n dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / h ** 2\n dxykxy_tr = tf.multiply(dim * h ** 2 - pdist, kxy) / h ** 4\n return kxy, dxkxy, dxykxy_tr\n\n\ndef imq_kernel(x, dim=X_dim, beta=-0.5, c=1.0):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x\n )[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY)\n kxy = (c + pdist) ** beta\n coeff = 2 * beta * (c + pdist) ** (beta - 1)\n dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.\n reduce_sum(coeff, axis=1), 1))\n dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2), -2 * dim * c * beta +\n (-4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)\n return kxy, dxkxy, dxykxy_tr\n\n\n<mask token>\n\n\ndef ksd_emp(x, ap=1, dim=X_dim):\n sq = S_q(x, ap)\n kxy, dxkxy, dxykxy_tr = Kernel(x, dim)\n t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr\n t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))\n n = tf.cast(tf.shape(x)[0], tf.float64)\n ksd = (tf.reduce_sum(t13) + t2) / n ** 2\n return ksd\n\n\ndef generator(z):\n G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)\n G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)\n out = 10.0 * tf.matmul(G_h2, G_W3) + G_b3\n return out\n\n\ndef evaluation(theta, X_t=X_test, y_t=y_test):\n w = theta[:, :-1]\n y = y_t.reshape([-1, 1])\n coff = -np.matmul(y * X_t, w.T)\n prob = np.mean(1.0 / (1 + np.exp(coff)), axis=1)\n acc = np.mean(prob > 0.5)\n llh = np.mean(np.log(prob))\n return acc, llh\n\n\n<mask token>\nsess.run(tf.global_variables_initializer())\n<mask token>\nfor it in range(n_iter):\n batch = [(i % N) for i in range(it * mb_size_x, (it + 1) * mb_size_x)]\n X_b = X_train[batch, :]\n y_b = y_train[batch]\n _, loss_curr = sess.run([solver_KSD, ksd], feed_dict={Xs: X_b, ys: y_b,\n z: sample_z(mb_size, z_dim)})\n ksd_loss[it] = loss_curr\n if it % iter_eval == 0:\n post = sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim)})\n post_eval = evaluation(post)\n acc[it // iter_eval] = post_eval[0]\n loglik[it // iter_eval] = post_eval[1]\nplt.plot(ksd)\nplt.axvline(np.argmin(ksd_loss), color='r')\nplt.title('KSD loss (min={:.04f} at iter {})'.format(np.min(ksd_loss), np.\n argmin(ksd_loss)))\nplt.show()\nplt.close()\nplt.plot(np.arange(len(acc)) * iter_eval, acc)\nplt.ylim(top=0.8)\nplt.axhline(0.75, color='g')\nplt.title('Accuracy (max={:0.4f} at iter {})'.format(np.max(acc), np.argmax\n (acc) * iter_eval))\nplt.show()\nplt.close()\n",
"step-5": "\"\"\"\n\nSteinNS: BayesianLogisticRegression_KSD.py\n\nCreated on 10/9/18 6:25 PM\n\n@author: Hanxi Sun\n\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport scipy.io\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n\n########################################################################################################################\n# Data\n\ndata = scipy.io.loadmat(\"data/covertype.mat\")\nX_input = data['covtype'][:, 1:]\ny_input = data['covtype'][:, 0]\ny_input[y_input == 2] = -1\n\nN_all = X_input.shape[0]\nX_input = np.hstack([X_input, np.ones([N_all, 1])])\nd = X_input.shape[1]\nX_dim = d + 1 # dimension of the target distribution\n\n# split the data set into training and testing\nX_train, X_test, y_train, y_test = train_test_split(X_input, y_input, test_size=0.2, random_state=21)\nX_train_tf = tf.convert_to_tensor(X_train, dtype=tf.float64)\nX_test_tf = tf.convert_to_tensor(X_test, dtype=tf.float64)\ny_train_tf = tf.convert_to_tensor(y_train, dtype=tf.float64)\ny_test_tf = tf.convert_to_tensor(y_test, dtype=tf.float64)\n\nN = X_train.shape[0]\n\n\n########################################################################################################################\n# model parameters\n\nlr = 4e-4 # learning rate\nkernel = \"rbf\" # \"rbf\" or \"imq\" kernel\n\nz_dim = 100\nh_dim_g = 200\n\nmb_size_x = 100 # date mini-batch size\nmb_size = 100 # sample mini-batch size\nn_iter = 200000\niter_eval = 1000\n\noptimizer = tf.train.RMSPropOptimizer\n\n\n########################################################################################################################\n# network\ntf.reset_default_graph()\n\ninitializer = tf.contrib.layers.xavier_initializer()\n\nXs = tf.placeholder(tf.float64, shape=[None, d])\nys = tf.placeholder(tf.float64, shape=[None])\nz = tf.placeholder(tf.float64, shape=[None, z_dim])\n\nG_W1 = tf.get_variable('g_w1', [z_dim, h_dim_g], dtype=tf.float64, initializer=initializer)\nG_b1 = tf.get_variable('g_b1', [h_dim_g], dtype=tf.float64, initializer=initializer)\nG_W2 = tf.get_variable('g_w2', [h_dim_g, h_dim_g], dtype=tf.float64, initializer=initializer)\nG_b2 = tf.get_variable('g_b2', [h_dim_g], dtype=tf.float64, initializer=initializer)\nG_W3 = tf.get_variable('g_w3', [h_dim_g, X_dim], dtype=tf.float64, initializer=initializer)\nG_b3 = tf.get_variable('g_b3', [X_dim], dtype=tf.float64, initializer=initializer)\n\ntheta_G = [G_W1, G_b1, G_W2, G_b2, G_W3, G_b3]\n\n\n########################################################################################################################\n# functions & structures\n\ndef sample_z(m, n, sd=10.):\n return np.random.normal(0, sd, size=[m, n])\n\n\ndef S_q(theta, a0=1, b0=0.01):\n # Reference:\n # https://github.com/DartML/Stein-Variational-Gradient-Descent/blob/master/python/bayesian_logistic_regression.py\n\n w = theta[:, :-1] # (m, d)\n s = tf.reshape(theta[:, -1], shape=[-1, 1]) # (m, 1); alpha = s**2\n\n y_hat = 1. / (1. + tf.exp(- tf.matmul(Xs, tf.transpose(w)))) # (mx, m); shape(Xs) = (mx, d)\n y = tf.reshape((ys + 1.) / 2., shape=[-1, 1]) # (mx, 1)\n\n dw_data = tf.matmul(tf.transpose(y - y_hat), Xs) # (m, d)\n dw_prior = - s**2 * w # (m, d)\n dw = dw_data * N / mb_size_x + dw_prior # (m, d)\n\n w2 = tf.reshape(tf.reduce_sum(tf.square(w), axis=1), shape=[-1, 1]) # (m, 1); = wtw\n ds = (2. * a0 - 2 + d) / s - tf.multiply(w2 + 2. * b0, s) # (m, 1)\n\n return tf.concat([dw, ds], axis=1)\n\n\ndef rbf_kernel(x, dim=X_dim, h=1.):\n # Reference 1: https://github.com/ChunyuanLI/SVGD/blob/master/demo_svgd.ipynb\n # Reference 2: https://github.com/yc14600/svgd/blob/master/svgd.py\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x)[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY) # pairwise distance matrix\n\n kxy = tf.exp(- pdist / h ** 2 / 2.0) # kernel matrix\n\n sum_kxy = tf.expand_dims(tf.reduce_sum(kxy, axis=1), 1)\n dxkxy = tf.add(-tf.matmul(kxy, x), tf.multiply(x, sum_kxy)) / (h ** 2) # sum_y dk(x, y)/dx\n\n dxykxy_tr = tf.multiply((dim * (h**2) - pdist), kxy) / (h**4) # tr( dk(x, y)/dxdy )\n\n return kxy, dxkxy, dxykxy_tr\n\n\ndef imq_kernel(x, dim=X_dim, beta=-.5, c=1.):\n XY = tf.matmul(x, tf.transpose(x))\n X2_ = tf.reshape(tf.reduce_sum(tf.square(x), axis=1), shape=[tf.shape(x)[0], 1])\n X2 = tf.tile(X2_, [1, tf.shape(x)[0]])\n pdist = tf.subtract(tf.add(X2, tf.transpose(X2)), 2 * XY) # pairwise distance matrix\n\n kxy = (c + pdist) ** beta\n\n coeff = 2 * beta * ((c + pdist) ** (beta-1))\n dxkxy = tf.matmul(coeff, x) - tf.multiply(x, tf.expand_dims(tf.reduce_sum(coeff, axis=1), 1))\n\n dxykxy_tr = tf.multiply((c + pdist) ** (beta - 2),\n - 2 * dim * c * beta + (- 4 * beta ** 2 + (4 - 2 * dim) * beta) * pdist)\n\n return kxy, dxkxy, dxykxy_tr\n\n\nkernels = {\"rbf\": rbf_kernel,\n \"imq\": imq_kernel}\n\nKernel = kernels[kernel]\n\n\ndef ksd_emp(x, ap=1, dim=X_dim):\n sq = S_q(x, ap)\n kxy, dxkxy, dxykxy_tr = Kernel(x, dim)\n t13 = tf.multiply(tf.matmul(sq, tf.transpose(sq)), kxy) + dxykxy_tr\n t2 = 2 * tf.trace(tf.matmul(sq, tf.transpose(dxkxy)))\n n = tf.cast(tf.shape(x)[0], tf.float64)\n\n # ksd = (tf.reduce_sum(t13) - tf.trace(t13) + t2) / (n * (n-1))\n ksd = (tf.reduce_sum(t13) + t2) / (n ** 2)\n\n return ksd\n\n\ndef generator(z):\n G_h1 = tf.nn.tanh(tf.matmul(z, G_W1) + G_b1)\n G_h2 = tf.nn.tanh(tf.matmul(G_h1, G_W2) + G_b2)\n out = 10. * tf.matmul(G_h2, G_W3) + G_b3\n return out\n\n\ndef evaluation(theta, X_t=X_test, y_t=y_test):\n w = theta[:, :-1]\n y = y_t.reshape([-1, 1])\n coff = - np.matmul(y * X_t, w.T)\n prob = np.mean(1. / (1 + np.exp(coff)), axis=1)\n acc = np.mean(prob > .5)\n llh = np.mean(np.log(prob))\n return acc, llh\n\n\nG_sample = generator(z)\n\nksd = ksd_emp(G_sample)\nsolver_KSD = optimizer(learning_rate=lr).minimize(ksd, var_list=theta_G)\n\n\n#######################################################################################################################\n\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nksd_loss = np.zeros(n_iter)\nacc = np.zeros(1 + (n_iter // iter_eval))\nloglik = np.zeros(1 + (n_iter // iter_eval))\n\nfor it in range(n_iter):\n batch = [i % N for i in range(it * mb_size_x, (it + 1) * mb_size_x)]\n\n X_b = X_train[batch, :]\n y_b = y_train[batch]\n\n _, loss_curr = sess.run([solver_KSD, ksd], feed_dict={Xs: X_b, ys: y_b, z: sample_z(mb_size, z_dim)})\n\n ksd_loss[it] = loss_curr\n\n if it % iter_eval == 0:\n post = sess.run(G_sample, feed_dict={z: sample_z(mb_size, z_dim)})\n post_eval = evaluation(post)\n acc[it // iter_eval] = post_eval[0]\n loglik[it // iter_eval] = post_eval[1]\n\nplt.plot(ksd)\nplt.axvline(np.argmin(ksd_loss), color=\"r\")\nplt.title(\"KSD loss (min={:.04f} at iter {})\".format(np.min(ksd_loss), np.argmin(ksd_loss)))\nplt.show()\nplt.close()\n\n\nplt.plot(np.arange(len(acc)) * iter_eval, acc)\nplt.ylim(top=0.8)\nplt.axhline(0.75, color=\"g\")\nplt.title(\"Accuracy (max={:0.4f} at iter {})\".format(np.max(acc), np.argmax(acc)*iter_eval))\nplt.show()\nplt.close()\n\n\n\n",
"step-ids": [
4,
5,
7,
8,
11
]
}
|
[
4,
5,
7,
8,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('api_rest', '0004_auto_20200828_0749')]
operations = [migrations.RemoveField(model_name='event', name='user_id'
), migrations.AddField(model_name='event', name='users', field=
models.ManyToManyField(db_table='user_event', related_name='users',
to='api_rest.UserE'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('api_rest', '0004_auto_20200828_0749')]
operations = [migrations.RemoveField(model_name='event', name='user_id'
), migrations.AddField(model_name='event', name='users', field=
models.ManyToManyField(db_table='user_event', related_name='users',
to='api_rest.UserE'))]
<|reserved_special_token_1|>
# Generated by Django 3.1 on 2020-08-28 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_rest', '0004_auto_20200828_0749'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='user_id',
),
migrations.AddField(
model_name='event',
name='users',
field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'),
),
]
|
flexible
|
{
"blob_id": "bfd8385e8f4886b91dde59c04785134b9cd6a2b6",
"index": 3893,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api_rest', '0004_auto_20200828_0749')]\n operations = [migrations.RemoveField(model_name='event', name='user_id'\n ), migrations.AddField(model_name='event', name='users', field=\n models.ManyToManyField(db_table='user_event', related_name='users',\n to='api_rest.UserE'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api_rest', '0004_auto_20200828_0749')]\n operations = [migrations.RemoveField(model_name='event', name='user_id'\n ), migrations.AddField(model_name='event', name='users', field=\n models.ManyToManyField(db_table='user_event', related_name='users',\n to='api_rest.UserE'))]\n",
"step-5": "# Generated by Django 3.1 on 2020-08-28 14:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api_rest', '0004_auto_20200828_0749'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='event',\n name='user_id',\n ),\n migrations.AddField(\n model_name='event',\n name='users',\n field=models.ManyToManyField(db_table='user_event', related_name='users', to='api_rest.UserE'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import abc
import glob
import pathlib
import numpy as np
import torch
from tensorboardX import SummaryWriter
import time
import os
import matplotlib.pyplot as plt
from torch import nn
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_device_async
from fastspeech.utils.nvtx import Nvtx
from fastspeech.utils.fp16 import cast_model_to_half
import torch.cuda.profiler as profiler
from fastspeech.utils.logging import tprint
from fastspeech.utils.time import TimeElapsed
plt.switch_backend('Agg')
class Trainer(object):
"""
set seed
set n_epochs, n_steps
save/load model
validation
logging
distributed
"""
def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):
self.data_loader = data_loader
self.model_name = model_name
self.model = model
self.n_epochs = n_epochs
self.save_steps = save_steps
self.log_steps = log_steps
self.ckpt_path = ckpt_path
self.log_path = log_path
self.final_steps = final_steps
self.step = step
self.device = device
self.use_amp = use_amp
self.nvprof_iter_start = nvprof_iter_start
self.nvprof_iter_end = nvprof_iter_end
self.pyprof_enabled = pyprof_enabled
self.detect_anomaly = detect_anomaly
# model
self.model.train()
to_device_async(self.model, self.device)
num_param = sum(param.numel() for param in model.parameters())
tprint('The number of {} parameters: {}'.format(
self.model_name, num_param))
# optimizer
self.optimizer = optimizer_fn(model)
# lr scheduler
if lr_scheduler_fn:
self.lr_scheduler = lr_scheduler_fn(self.optimizer)
else:
self.lr_scheduler = None
# automatic mixed precision
if self.use_amp:
from apex import amp
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# profile
if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:
from apex import pyprof
pyprof.nvtx.init()
# data parallel
self.model = nn.DataParallel(self.model)
# set seed
if seed is None:
seed = np.random.randint(2**16)
np.random.seed(seed)
torch.manual_seed(seed)
# data loader
self.data_loader_iter = self.repeat(self.data_loader, n_epochs)
# logging
if log_path:
# tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS
log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))
self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)
# checkpoint path
if self.ckpt_path:
self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)
pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)
# load checkpoint
self.load()
def train(self):
try:
with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):
for i in range(self.step+1, self.final_steps + 1):
self.step = i
tprint("------------- TRAIN step : {} -------------".format(i))
if self.nvprof_iter_start and i == self.nvprof_iter_start:
profiler.start()
timer = TimeElapsed(name="Training time during profiling", format=":.6f")
timer.start()
with Nvtx("step #{}".format(self.step)):
loss, meta = self.do_step()
if self.nvprof_iter_end and i == self.nvprof_iter_end:
profiler.stop()
timer.end()
if self.lr_scheduler:
for param_group in self.optimizer.param_groups:
tprint("lr: {:06f}".format(param_group['lr']))
self.lr_scheduler.step(self.step)
if self.step % self.log_steps == 0:
self.log(loss, meta)
if self.ckpt_path and self.save_steps and i % self.save_steps == 0:
self.save()
tprint("Training has been done.")
except StopIteration: # done by n_epochs
tprint("Training has been done. (by n_epochs)")
except KeyboardInterrupt:
tprint("Training has been canceled.")
@abc.abstractmethod
def loss(self, inputs, model):
raise NotImplemented
def do_step(self):
with Nvtx("data load", enabled=False):
data = next(self.data_loader_iter)
with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):
with Nvtx("forward"):
loss, meta = self.loss(data, self.model)
self.optimizer.zero_grad()
with Nvtx("backward"):
if self.use_amp:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
with Nvtx("weight update"):
self.optimizer.step()
return loss, meta
def log(self, loss, meta):
self.console_log('train', loss, meta)
if self.log_path:
self.tensorboard_log('train', loss)
def save(self):
state_dict = {
'step': self.step,
'model': self.model.state_dict(),
'optim': self.optimizer.state_dict(),
}
torch.save(state_dict, self.ckpt_path +
'/checkpoint_{:06d}.pt'.format(self.step))
tprint('[Save] Model "{}". Step={}.'.format(
self.model_name, self.step))
def load(self, load_optim=True):
files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))
if files_exist:
# load the latest created file.
latest_file = max(files_exist, key=os.path.getctime)
state_dict = torch.load(latest_file)
self.step = state_dict['step']
self.model.load_state_dict(state_dict['model'])
if load_optim:
self.optimizer.load_state_dict(state_dict['optim'])
tprint('[Load] Checkpoint \'{}\'. Step={}'.format(
latest_file, self.step))
else:
tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))
def console_log(self, tag, loss, meta):
# console logging
msg = 'loss: {:.6f}'.format(loss)
for key, value in meta.items():
msg += ',\t{}: {:.4f}'.format(key, value)
tprint(msg)
def tensorboard_log(self, tag, loss):
self.tbwriter.add_scalar(
'{}/loss'.format(tag), loss, global_step=self.step)
@staticmethod
def repeat(iterable, n_repeat=None):
cnt = 0
while n_repeat is None or cnt < n_repeat:
for x in iterable:
yield x
cnt += 1
return StopIteration()
|
normal
|
{
"blob_id": "9fa534664056a8cf9e9a64ccc7d6dd4de2ec0936",
"index": 1514,
"step-1": "<mask token>\n\n\nclass Trainer(object):\n <mask token>\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n <mask token>\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n <mask token>\n <mask token>\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-2": "<mask token>\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled\n ):\n for i in range(self.step + 1, self.final_steps + 1):\n self.step = i\n tprint('------------- TRAIN step : {} -------------'.\n format(i))\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\n 'Training time during profiling', format=':.6f')\n timer.start()\n with Nvtx('step #{}'.format(self.step)):\n loss, meta = self.do_step()\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint('lr: {:06f}'.format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n if (self.ckpt_path and self.save_steps and i % self.\n save_steps == 0):\n self.save()\n tprint('Training has been done.')\n except StopIteration:\n tprint('Training has been done. (by n_epochs)')\n except KeyboardInterrupt:\n tprint('Training has been canceled.')\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx('data load', enabled=False):\n data = next(self.data_loader_iter)\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx('forward'):\n loss, meta = self.loss(data, self.model)\n self.optimizer.zero_grad()\n with Nvtx('backward'):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n with Nvtx('weight update'):\n self.optimizer.step()\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-3": "<mask token>\nplt.switch_backend('Agg')\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled\n ):\n for i in range(self.step + 1, self.final_steps + 1):\n self.step = i\n tprint('------------- TRAIN step : {} -------------'.\n format(i))\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\n 'Training time during profiling', format=':.6f')\n timer.start()\n with Nvtx('step #{}'.format(self.step)):\n loss, meta = self.do_step()\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint('lr: {:06f}'.format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n if (self.ckpt_path and self.save_steps and i % self.\n save_steps == 0):\n self.save()\n tprint('Training has been done.')\n except StopIteration:\n tprint('Training has been done. (by n_epochs)')\n except KeyboardInterrupt:\n tprint('Training has been canceled.')\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx('data load', enabled=False):\n data = next(self.data_loader_iter)\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx('forward'):\n loss, meta = self.loss(data, self.model)\n self.optimizer.zero_grad()\n with Nvtx('backward'):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n with Nvtx('weight update'):\n self.optimizer.step()\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-4": "import abc\nimport glob\nimport pathlib\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nimport time\nimport os\nimport matplotlib.pyplot as plt\nfrom torch import nn\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.pytorch import to_device_async\nfrom fastspeech.utils.nvtx import Nvtx\nfrom fastspeech.utils.fp16 import cast_model_to_half\nimport torch.cuda.profiler as profiler\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.time import TimeElapsed\nplt.switch_backend('Agg')\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn,\n final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path\n =None, n_epochs=None, save_steps=None, log_steps=10, device='cuda',\n use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None,\n pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(self.model_name,\n num_param))\n self.optimizer = optimizer_fn(model)\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, self.\n optimizer, opt_level='O1')\n if (nvprof_iter_start and nvprof_iter_end is not None and\n pyprof_enabled):\n from apex import pyprof\n pyprof.nvtx.init()\n self.model = nn.DataParallel(self.model)\n if seed is None:\n seed = np.random.randint(2 ** 16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n if log_path:\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled\n ):\n for i in range(self.step + 1, self.final_steps + 1):\n self.step = i\n tprint('------------- TRAIN step : {} -------------'.\n format(i))\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\n 'Training time during profiling', format=':.6f')\n timer.start()\n with Nvtx('step #{}'.format(self.step)):\n loss, meta = self.do_step()\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint('lr: {:06f}'.format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n if (self.ckpt_path and self.save_steps and i % self.\n save_steps == 0):\n self.save()\n tprint('Training has been done.')\n except StopIteration:\n tprint('Training has been done. (by n_epochs)')\n except KeyboardInterrupt:\n tprint('Training has been canceled.')\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx('data load', enabled=False):\n data = next(self.data_loader_iter)\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx('forward'):\n loss, meta = self.loss(data, self.model)\n self.optimizer.zero_grad()\n with Nvtx('backward'):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n with Nvtx('weight update'):\n self.optimizer.step()\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {'step': self.step, 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict()}\n torch.save(state_dict, self.ckpt_path + '/checkpoint_{:06d}.pt'.\n format(self.step))\n tprint('[Save] Model \"{}\". Step={}.'.format(self.model_name, self.step)\n )\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n tprint(\"[Load] Checkpoint '{}'. Step={}\".format(latest_file,\n self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)\n )\n\n def console_log(self, tag, loss, meta):\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar('{}/loss'.format(tag), loss, global_step=\n self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-5": "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the NVIDIA CORPORATION nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport abc\nimport glob\nimport pathlib\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\nimport time\nimport os\nimport matplotlib.pyplot as plt\nfrom torch import nn\n\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.pytorch import to_device_async\nfrom fastspeech.utils.nvtx import Nvtx\nfrom fastspeech.utils.fp16 import cast_model_to_half\n\nimport torch.cuda.profiler as profiler\nfrom fastspeech.utils.logging import tprint\nfrom fastspeech.utils.time import TimeElapsed\n\nplt.switch_backend('Agg')\n\n\nclass Trainer(object):\n \"\"\"\n set seed\n set n_epochs, n_steps\n save/load model\n validation\n logging\n distributed\n \"\"\"\n\n def __init__(self, data_loader, model_name, model, optimizer_fn, final_steps, lr_scheduler_fn=None, step=0, ckpt_path=None, log_path=None, n_epochs=None, save_steps=None, log_steps=10, device='cuda', use_amp=False, nvprof_iter_start=None, nvprof_iter_end=None, pyprof_enabled=False, detect_anomaly=False, seed=None):\n self.data_loader = data_loader\n self.model_name = model_name\n self.model = model\n self.n_epochs = n_epochs\n self.save_steps = save_steps\n self.log_steps = log_steps\n self.ckpt_path = ckpt_path\n self.log_path = log_path\n self.final_steps = final_steps\n self.step = step\n self.device = device\n self.use_amp = use_amp\n self.nvprof_iter_start = nvprof_iter_start\n self.nvprof_iter_end = nvprof_iter_end\n self.pyprof_enabled = pyprof_enabled\n self.detect_anomaly = detect_anomaly\n\n # model\n self.model.train()\n to_device_async(self.model, self.device)\n num_param = sum(param.numel() for param in model.parameters())\n tprint('The number of {} parameters: {}'.format(\n self.model_name, num_param))\n\n # optimizer\n self.optimizer = optimizer_fn(model)\n\n # lr scheduler\n if lr_scheduler_fn:\n self.lr_scheduler = lr_scheduler_fn(self.optimizer)\n else:\n self.lr_scheduler = None\n\n # automatic mixed precision\n if self.use_amp:\n from apex import amp\n self.model, self.optimizer = amp.initialize(self.model, \n self.optimizer, \n opt_level='O1')\n\n # profile\n if nvprof_iter_start and nvprof_iter_end is not None and pyprof_enabled:\n from apex import pyprof\n pyprof.nvtx.init()\n\n # data parallel\n self.model = nn.DataParallel(self.model)\n\n # set seed\n if seed is None:\n seed = np.random.randint(2**16)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n # data loader\n self.data_loader_iter = self.repeat(self.data_loader, n_epochs)\n\n # logging\n if log_path:\n # tensorboard log path : {log_path}/YYYYMMDD-HHMMMSS\n log_path = os.path.join(log_path, time.strftime('%Y%m%d-%H%M%S'))\n self.tbwriter = SummaryWriter(log_dir=log_path, flush_secs=10)\n\n # checkpoint path\n if self.ckpt_path:\n self.ckpt_path = os.path.join(self.ckpt_path, self.model_name)\n pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True)\n\n # load checkpoint\n self.load()\n\n def train(self):\n try:\n with torch.autograd.profiler.emit_nvtx(enabled=self.pyprof_enabled):\n for i in range(self.step+1, self.final_steps + 1):\n self.step = i\n tprint(\"------------- TRAIN step : {} -------------\".format(i))\n\n if self.nvprof_iter_start and i == self.nvprof_iter_start:\n profiler.start()\n timer = TimeElapsed(name=\"Training time during profiling\", format=\":.6f\")\n timer.start()\n\n with Nvtx(\"step #{}\".format(self.step)):\n loss, meta = self.do_step()\n\n if self.nvprof_iter_end and i == self.nvprof_iter_end:\n profiler.stop()\n timer.end()\n \n if self.lr_scheduler:\n for param_group in self.optimizer.param_groups:\n tprint(\"lr: {:06f}\".format(param_group['lr']))\n self.lr_scheduler.step(self.step)\n\n if self.step % self.log_steps == 0:\n self.log(loss, meta)\n\n if self.ckpt_path and self.save_steps and i % self.save_steps == 0:\n self.save()\n\n tprint(\"Training has been done.\")\n except StopIteration: # done by n_epochs\n tprint(\"Training has been done. (by n_epochs)\")\n except KeyboardInterrupt:\n tprint(\"Training has been canceled.\")\n\n @abc.abstractmethod\n def loss(self, inputs, model):\n raise NotImplemented\n\n def do_step(self):\n with Nvtx(\"data load\", enabled=False):\n data = next(self.data_loader_iter)\n\n with torch.autograd.set_detect_anomaly(mode=self.detect_anomaly):\n with Nvtx(\"forward\"):\n loss, meta = self.loss(data, self.model)\n \n self.optimizer.zero_grad()\n\n with Nvtx(\"backward\"):\n if self.use_amp:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n with Nvtx(\"weight update\"):\n self.optimizer.step()\n\n return loss, meta\n\n def log(self, loss, meta):\n self.console_log('train', loss, meta)\n if self.log_path:\n self.tensorboard_log('train', loss)\n\n def save(self):\n state_dict = {\n 'step': self.step,\n 'model': self.model.state_dict(),\n 'optim': self.optimizer.state_dict(),\n }\n torch.save(state_dict, self.ckpt_path +\n '/checkpoint_{:06d}.pt'.format(self.step))\n\n tprint('[Save] Model \"{}\". Step={}.'.format(\n self.model_name, self.step))\n\n def load(self, load_optim=True):\n files_exist = glob.glob(os.path.join(self.ckpt_path, '*'))\n if files_exist:\n # load the latest created file.\n latest_file = max(files_exist, key=os.path.getctime)\n state_dict = torch.load(latest_file)\n\n self.step = state_dict['step']\n self.model.load_state_dict(state_dict['model'])\n if load_optim:\n self.optimizer.load_state_dict(state_dict['optim'])\n\n tprint('[Load] Checkpoint \\'{}\\'. Step={}'.format(\n latest_file, self.step))\n else:\n tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path))\n\n def console_log(self, tag, loss, meta):\n # console logging\n msg = 'loss: {:.6f}'.format(loss)\n for key, value in meta.items():\n msg += ',\\t{}: {:.4f}'.format(key, value)\n tprint(msg)\n\n def tensorboard_log(self, tag, loss):\n self.tbwriter.add_scalar(\n '{}/loss'.format(tag), loss, global_step=self.step)\n\n @staticmethod\n def repeat(iterable, n_repeat=None):\n cnt = 0\n while n_repeat is None or cnt < n_repeat:\n for x in iterable:\n yield x\n cnt += 1\n return StopIteration()\n",
"step-ids": [
8,
12,
13,
14,
15
]
}
|
[
8,
12,
13,
14,
15
] |
<|reserved_special_token_0|>
class UnknownCommand(Exception):
pass
<|reserved_special_token_0|>
class Tamagotchi:
def __init__(self) ->None:
self._age = 0
self._food_level = INITIAL_FOOD_LEVEL
self._energy_level = INITIAL_ENERGY_LEVEL
self._poop_level = INITIAL_POOP_LEVEL
self._is_awake = INITIAL_IS_AWAKE
self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}
def __repr__(self) ->str:
return (
f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'
)
def process_command(self, command: str) ->None:
try:
self._commands[command]()
except KeyError:
raise UnknownCommand(command)
def _feed(self) ->None:
if self._is_awake:
self._food_level = _add_and_clip(self._food_level,
FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)
def _clean(self) ->None:
self._poop_level = 0
def _sleep(self) ->None:
self._is_awake = False
def is_alive(self) ->bool:
return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL
def update(self) ->None:
self._age += 1
self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,
MAX_FOOD_LEVEL)
if self._energy_level >= MAX_ENERGY_LEVEL:
self._is_awake = True
if self._energy_level <= 0:
self._is_awake = False
energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else
ENERGY_PER_TICK_ASLEEP)
self._energy_level = _add_and_clip(self._energy_level, energy_delta,
0, MAX_ENERGY_LEVEL)
if self._age % TICKS_PER_POOP == 0:
self._poop_level += 1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnknownCommand(Exception):
pass
<|reserved_special_token_0|>
class Tamagotchi:
def __init__(self) ->None:
self._age = 0
self._food_level = INITIAL_FOOD_LEVEL
self._energy_level = INITIAL_ENERGY_LEVEL
self._poop_level = INITIAL_POOP_LEVEL
self._is_awake = INITIAL_IS_AWAKE
self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}
def __repr__(self) ->str:
return (
f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'
)
def process_command(self, command: str) ->None:
try:
self._commands[command]()
except KeyError:
raise UnknownCommand(command)
def _feed(self) ->None:
if self._is_awake:
self._food_level = _add_and_clip(self._food_level,
FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)
def _clean(self) ->None:
self._poop_level = 0
def _sleep(self) ->None:
self._is_awake = False
def is_alive(self) ->bool:
return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL
def update(self) ->None:
self._age += 1
self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,
MAX_FOOD_LEVEL)
if self._energy_level >= MAX_ENERGY_LEVEL:
self._is_awake = True
if self._energy_level <= 0:
self._is_awake = False
energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else
ENERGY_PER_TICK_ASLEEP)
self._energy_level = _add_and_clip(self._energy_level, energy_delta,
0, MAX_ENERGY_LEVEL)
if self._age % TICKS_PER_POOP == 0:
self._poop_level += 1
def main():
tamagotchi = Tamagotchi()
with NonBlockingKeyboard() as kb:
while True:
inpt = kb.getstr()
should_quit = False
for c in inpt:
try:
tamagotchi.process_command(c)
except UnknownCommand:
if c == 'q':
should_quit = True
break
else:
raise
if should_quit:
break
tamagotchi.update()
print(tamagotchi)
if not tamagotchi.is_alive():
print('tamagotchi died')
break
time.sleep(TICK_DURATION)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnknownCommand(Exception):
pass
def _add_and_clip(x, dx, x_min, x_max):
return max(x_min, min(x_max, x + dx))
class Tamagotchi:
def __init__(self) ->None:
self._age = 0
self._food_level = INITIAL_FOOD_LEVEL
self._energy_level = INITIAL_ENERGY_LEVEL
self._poop_level = INITIAL_POOP_LEVEL
self._is_awake = INITIAL_IS_AWAKE
self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}
def __repr__(self) ->str:
return (
f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'
)
def process_command(self, command: str) ->None:
try:
self._commands[command]()
except KeyError:
raise UnknownCommand(command)
def _feed(self) ->None:
if self._is_awake:
self._food_level = _add_and_clip(self._food_level,
FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)
def _clean(self) ->None:
self._poop_level = 0
def _sleep(self) ->None:
self._is_awake = False
def is_alive(self) ->bool:
return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL
def update(self) ->None:
self._age += 1
self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,
MAX_FOOD_LEVEL)
if self._energy_level >= MAX_ENERGY_LEVEL:
self._is_awake = True
if self._energy_level <= 0:
self._is_awake = False
energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else
ENERGY_PER_TICK_ASLEEP)
self._energy_level = _add_and_clip(self._energy_level, energy_delta,
0, MAX_ENERGY_LEVEL)
if self._age % TICKS_PER_POOP == 0:
self._poop_level += 1
def main():
tamagotchi = Tamagotchi()
with NonBlockingKeyboard() as kb:
while True:
inpt = kb.getstr()
should_quit = False
for c in inpt:
try:
tamagotchi.process_command(c)
except UnknownCommand:
if c == 'q':
should_quit = True
break
else:
raise
if should_quit:
break
tamagotchi.update()
print(tamagotchi)
if not tamagotchi.is_alive():
print('tamagotchi died')
break
time.sleep(TICK_DURATION)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnknownCommand(Exception):
pass
def _add_and_clip(x, dx, x_min, x_max):
return max(x_min, min(x_max, x + dx))
class Tamagotchi:
def __init__(self) ->None:
self._age = 0
self._food_level = INITIAL_FOOD_LEVEL
self._energy_level = INITIAL_ENERGY_LEVEL
self._poop_level = INITIAL_POOP_LEVEL
self._is_awake = INITIAL_IS_AWAKE
self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}
def __repr__(self) ->str:
return (
f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'
)
def process_command(self, command: str) ->None:
try:
self._commands[command]()
except KeyError:
raise UnknownCommand(command)
def _feed(self) ->None:
if self._is_awake:
self._food_level = _add_and_clip(self._food_level,
FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)
def _clean(self) ->None:
self._poop_level = 0
def _sleep(self) ->None:
self._is_awake = False
def is_alive(self) ->bool:
return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL
def update(self) ->None:
self._age += 1
self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,
MAX_FOOD_LEVEL)
if self._energy_level >= MAX_ENERGY_LEVEL:
self._is_awake = True
if self._energy_level <= 0:
self._is_awake = False
energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else
ENERGY_PER_TICK_ASLEEP)
self._energy_level = _add_and_clip(self._energy_level, energy_delta,
0, MAX_ENERGY_LEVEL)
if self._age % TICKS_PER_POOP == 0:
self._poop_level += 1
def main():
tamagotchi = Tamagotchi()
with NonBlockingKeyboard() as kb:
while True:
inpt = kb.getstr()
should_quit = False
for c in inpt:
try:
tamagotchi.process_command(c)
except UnknownCommand:
if c == 'q':
should_quit = True
break
else:
raise
if should_quit:
break
tamagotchi.update()
print(tamagotchi)
if not tamagotchi.is_alive():
print('tamagotchi died')
break
time.sleep(TICK_DURATION)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import time
from junk.keyboard_non_blocking import NonBlockingKeyboard
TICK_DURATION = 0.05
INITIAL_FOOD_LEVEL = 100
FOOD_PER_TICK = -1
FOOD_PER_FEED = 10
MAX_FOOD_LEVEL = 100
INITIAL_ENERGY_LEVEL = 50
ENERGY_PER_TICK_AWAKE = -1
ENERGY_PER_TICK_ASLEEP = 5
MAX_ENERGY_LEVEL = 100
INITIAL_IS_AWAKE = False
INITIAL_POOP_LEVEL = 0
TICKS_PER_POOP = 25
MAX_POOP_LEVEL = 10
class UnknownCommand(Exception):
pass
def _add_and_clip(x, dx, x_min, x_max):
return max(x_min, min(x_max, x + dx))
class Tamagotchi:
def __init__(self) -> None:
self._age = 0
self._food_level = INITIAL_FOOD_LEVEL
self._energy_level = INITIAL_ENERGY_LEVEL
self._poop_level = INITIAL_POOP_LEVEL
self._is_awake = INITIAL_IS_AWAKE
self._commands = {
"f": self._feed,
"c": self._clean,
"s": self._sleep,
}
def __repr__(self) -> str:
return f"Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})"
def process_command(self, command: str) -> None:
try:
self._commands[command]()
except KeyError:
raise UnknownCommand(command)
def _feed(self) -> None:
if self._is_awake:
self._food_level = _add_and_clip(
self._food_level, FOOD_PER_FEED, 0, MAX_FOOD_LEVEL
)
def _clean(self) -> None:
self._poop_level = 0
def _sleep(self) -> None:
self._is_awake = False
def is_alive(self) -> bool:
return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL
def update(self) -> None:
self._age += 1
# Food
self._food_level = _add_and_clip(
self._food_level, FOOD_PER_TICK, 0, MAX_FOOD_LEVEL
)
# Energy
if self._energy_level >= MAX_ENERGY_LEVEL:
self._is_awake = True
if self._energy_level <= 0:
self._is_awake = False
energy_delta = (
ENERGY_PER_TICK_AWAKE if self._is_awake else ENERGY_PER_TICK_ASLEEP
)
self._energy_level = _add_and_clip(
self._energy_level, energy_delta, 0, MAX_ENERGY_LEVEL
)
# Poop
if self._age % TICKS_PER_POOP == 0:
self._poop_level += 1
def main():
tamagotchi = Tamagotchi()
with NonBlockingKeyboard() as kb:
while True:
inpt = kb.getstr()
should_quit = False
for c in inpt:
try:
tamagotchi.process_command(c)
except UnknownCommand:
if c == "q":
should_quit = True
break
else:
raise
if should_quit:
break
tamagotchi.update()
print(tamagotchi)
if not tamagotchi.is_alive():
print("tamagotchi died")
break
time.sleep(TICK_DURATION)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "1dd09a09f542099091d94d466ebd7cc149884eb4",
"index": 7385,
"step-1": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\n<mask token>\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\n<mask token>\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == 'q':\n should_quit = True\n break\n else:\n raise\n if should_quit:\n break\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print('tamagotchi died')\n break\n time.sleep(TICK_DURATION)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\ndef _add_and_clip(x, dx, x_min, x_max):\n return max(x_min, min(x_max, x + dx))\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == 'q':\n should_quit = True\n break\n else:\n raise\n if should_quit:\n break\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print('tamagotchi died')\n break\n time.sleep(TICK_DURATION)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass UnknownCommand(Exception):\n pass\n\n\ndef _add_and_clip(x, dx, x_min, x_max):\n return max(x_min, min(x_max, x + dx))\n\n\nclass Tamagotchi:\n\n def __init__(self) ->None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {'f': self._feed, 'c': self._clean, 's': self._sleep}\n\n def __repr__(self) ->str:\n return (\n f'Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})'\n )\n\n def process_command(self, command: str) ->None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) ->None:\n if self._is_awake:\n self._food_level = _add_and_clip(self._food_level,\n FOOD_PER_FEED, 0, MAX_FOOD_LEVEL)\n\n def _clean(self) ->None:\n self._poop_level = 0\n\n def _sleep(self) ->None:\n self._is_awake = False\n\n def is_alive(self) ->bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) ->None:\n self._age += 1\n self._food_level = _add_and_clip(self._food_level, FOOD_PER_TICK, 0,\n MAX_FOOD_LEVEL)\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (ENERGY_PER_TICK_AWAKE if self._is_awake else\n ENERGY_PER_TICK_ASLEEP)\n self._energy_level = _add_and_clip(self._energy_level, energy_delta,\n 0, MAX_ENERGY_LEVEL)\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == 'q':\n should_quit = True\n break\n else:\n raise\n if should_quit:\n break\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print('tamagotchi died')\n break\n time.sleep(TICK_DURATION)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import time\n\nfrom junk.keyboard_non_blocking import NonBlockingKeyboard\n\nTICK_DURATION = 0.05\n\nINITIAL_FOOD_LEVEL = 100\nFOOD_PER_TICK = -1\nFOOD_PER_FEED = 10\nMAX_FOOD_LEVEL = 100\n\nINITIAL_ENERGY_LEVEL = 50\nENERGY_PER_TICK_AWAKE = -1\nENERGY_PER_TICK_ASLEEP = 5\nMAX_ENERGY_LEVEL = 100\n\nINITIAL_IS_AWAKE = False\n\nINITIAL_POOP_LEVEL = 0\nTICKS_PER_POOP = 25\nMAX_POOP_LEVEL = 10\n\n\nclass UnknownCommand(Exception):\n pass\n\n\ndef _add_and_clip(x, dx, x_min, x_max):\n return max(x_min, min(x_max, x + dx))\n\n\nclass Tamagotchi:\n def __init__(self) -> None:\n self._age = 0\n self._food_level = INITIAL_FOOD_LEVEL\n self._energy_level = INITIAL_ENERGY_LEVEL\n self._poop_level = INITIAL_POOP_LEVEL\n self._is_awake = INITIAL_IS_AWAKE\n self._commands = {\n \"f\": self._feed,\n \"c\": self._clean,\n \"s\": self._sleep,\n }\n\n def __repr__(self) -> str:\n return f\"Tamagotchi(is_awake={self._is_awake}, food_level={self._food_level}, energy_level={self._energy_level}, poop_level={self._poop_level}, age={self._age})\"\n\n def process_command(self, command: str) -> None:\n try:\n self._commands[command]()\n except KeyError:\n raise UnknownCommand(command)\n\n def _feed(self) -> None:\n if self._is_awake:\n self._food_level = _add_and_clip(\n self._food_level, FOOD_PER_FEED, 0, MAX_FOOD_LEVEL\n )\n\n def _clean(self) -> None:\n self._poop_level = 0\n\n def _sleep(self) -> None:\n self._is_awake = False\n\n def is_alive(self) -> bool:\n return self._food_level > 0 and self._poop_level < MAX_POOP_LEVEL\n\n def update(self) -> None:\n self._age += 1\n # Food\n self._food_level = _add_and_clip(\n self._food_level, FOOD_PER_TICK, 0, MAX_FOOD_LEVEL\n )\n # Energy\n if self._energy_level >= MAX_ENERGY_LEVEL:\n self._is_awake = True\n if self._energy_level <= 0:\n self._is_awake = False\n energy_delta = (\n ENERGY_PER_TICK_AWAKE if self._is_awake else ENERGY_PER_TICK_ASLEEP\n )\n self._energy_level = _add_and_clip(\n self._energy_level, energy_delta, 0, MAX_ENERGY_LEVEL\n )\n # Poop\n if self._age % TICKS_PER_POOP == 0:\n self._poop_level += 1\n\n\ndef main():\n tamagotchi = Tamagotchi()\n with NonBlockingKeyboard() as kb:\n while True:\n inpt = kb.getstr()\n\n should_quit = False\n for c in inpt:\n try:\n tamagotchi.process_command(c)\n except UnknownCommand:\n if c == \"q\":\n should_quit = True\n break\n else:\n raise\n\n if should_quit:\n break\n\n tamagotchi.update()\n print(tamagotchi)\n if not tamagotchi.is_alive():\n print(\"tamagotchi died\")\n break\n time.sleep(TICK_DURATION)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
<|reserved_special_token_0|>
def _send(body, subject):
msg = MIMEMultipart()
msg['From'] = FROM
msg['To'] = TO
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(host=HOST, port=int(PORT))
server.starttls()
server.login(FROM, PASSWORD)
senders = server.sendmail(FROM, TO, msg.as_string())
server.quit()
return senders
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_email(body, subject=
f"ERROR LOG [{datetime.strftime(datetime.now(), '%b %d, %Y - %I:%M %p')}]"
):
"""
Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'
"""
thread = Thread(target=_send, args=(body, subject))
thread.start()
def _send(body, subject):
msg = MIMEMultipart()
msg['From'] = FROM
msg['To'] = TO
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(host=HOST, port=int(PORT))
server.starttls()
server.login(FROM, PASSWORD)
senders = server.sendmail(FROM, TO, msg.as_string())
server.quit()
return senders
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FROM = os.getenv('EMAIL_FROM')
TO = os.getenv('EMAIL_TO')
HOST = os.getenv('EMAIL_HOST')
PORT = os.getenv('EMAIL_PORT')
PASSWORD = os.getenv('EMAIL_PASSWORD')
def send_email(body, subject=
f"ERROR LOG [{datetime.strftime(datetime.now(), '%b %d, %Y - %I:%M %p')}]"
):
"""
Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'
"""
thread = Thread(target=_send, args=(body, subject))
thread.start()
def _send(body, subject):
msg = MIMEMultipart()
msg['From'] = FROM
msg['To'] = TO
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(host=HOST, port=int(PORT))
server.starttls()
server.login(FROM, PASSWORD)
senders = server.sendmail(FROM, TO, msg.as_string())
server.quit()
return senders
<|reserved_special_token_1|>
import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
from threading import Thread
FROM = os.getenv('EMAIL_FROM')
TO = os.getenv('EMAIL_TO')
HOST = os.getenv('EMAIL_HOST')
PORT = os.getenv('EMAIL_PORT')
PASSWORD = os.getenv('EMAIL_PASSWORD')
def send_email(body, subject=
f"ERROR LOG [{datetime.strftime(datetime.now(), '%b %d, %Y - %I:%M %p')}]"
):
"""
Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'
"""
thread = Thread(target=_send, args=(body, subject))
thread.start()
def _send(body, subject):
msg = MIMEMultipart()
msg['From'] = FROM
msg['To'] = TO
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(host=HOST, port=int(PORT))
server.starttls()
server.login(FROM, PASSWORD)
senders = server.sendmail(FROM, TO, msg.as_string())
server.quit()
return senders
<|reserved_special_token_1|>
import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
from threading import Thread
FROM = os.getenv('EMAIL_FROM')
TO = os.getenv('EMAIL_TO')
HOST = os.getenv('EMAIL_HOST')
PORT = os.getenv('EMAIL_PORT')
PASSWORD = os.getenv('EMAIL_PASSWORD')
def send_email(body, subject=f'ERROR LOG [{datetime.strftime(datetime.now(), "%b %d, %Y - %I:%M %p")}]'):
"""
Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'
"""
# Send the email on a separate thread so the server doesn't
# have to wait for it to finish
thread = Thread(target=_send, args=(body, subject))
thread.start()
def _send(body, subject):
msg = MIMEMultipart()
msg['From'] = FROM
msg['To'] = TO
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP(host=HOST, port=int(PORT))
server.starttls()
server.login(FROM, PASSWORD)
senders = server.sendmail(FROM, TO, msg.as_string())
server.quit()
return senders
|
flexible
|
{
"blob_id": "60c3f6775d5112ff178bd3774c776819573887bb",
"index": 9367,
"step-1": "<mask token>\n\n\ndef _send(body, subject):\n msg = MIMEMultipart()\n msg['From'] = FROM\n msg['To'] = TO\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP(host=HOST, port=int(PORT))\n server.starttls()\n server.login(FROM, PASSWORD)\n senders = server.sendmail(FROM, TO, msg.as_string())\n server.quit()\n return senders\n",
"step-2": "<mask token>\n\n\ndef send_email(body, subject=\n f\"ERROR LOG [{datetime.strftime(datetime.now(), '%b %d, %Y - %I:%M %p')}]\"\n ):\n \"\"\"\n Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'\n \"\"\"\n thread = Thread(target=_send, args=(body, subject))\n thread.start()\n\n\ndef _send(body, subject):\n msg = MIMEMultipart()\n msg['From'] = FROM\n msg['To'] = TO\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP(host=HOST, port=int(PORT))\n server.starttls()\n server.login(FROM, PASSWORD)\n senders = server.sendmail(FROM, TO, msg.as_string())\n server.quit()\n return senders\n",
"step-3": "<mask token>\nFROM = os.getenv('EMAIL_FROM')\nTO = os.getenv('EMAIL_TO')\nHOST = os.getenv('EMAIL_HOST')\nPORT = os.getenv('EMAIL_PORT')\nPASSWORD = os.getenv('EMAIL_PASSWORD')\n\n\ndef send_email(body, subject=\n f\"ERROR LOG [{datetime.strftime(datetime.now(), '%b %d, %Y - %I:%M %p')}]\"\n ):\n \"\"\"\n Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'\n \"\"\"\n thread = Thread(target=_send, args=(body, subject))\n thread.start()\n\n\ndef _send(body, subject):\n msg = MIMEMultipart()\n msg['From'] = FROM\n msg['To'] = TO\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP(host=HOST, port=int(PORT))\n server.starttls()\n server.login(FROM, PASSWORD)\n senders = server.sendmail(FROM, TO, msg.as_string())\n server.quit()\n return senders\n",
"step-4": "import smtplib\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom datetime import datetime\nfrom threading import Thread\nFROM = os.getenv('EMAIL_FROM')\nTO = os.getenv('EMAIL_TO')\nHOST = os.getenv('EMAIL_HOST')\nPORT = os.getenv('EMAIL_PORT')\nPASSWORD = os.getenv('EMAIL_PASSWORD')\n\n\ndef send_email(body, subject=\n f\"ERROR LOG [{datetime.strftime(datetime.now(), '%b %d, %Y - %I:%M %p')}]\"\n ):\n \"\"\"\n Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'\n \"\"\"\n thread = Thread(target=_send, args=(body, subject))\n thread.start()\n\n\ndef _send(body, subject):\n msg = MIMEMultipart()\n msg['From'] = FROM\n msg['To'] = TO\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n server = smtplib.SMTP(host=HOST, port=int(PORT))\n server.starttls()\n server.login(FROM, PASSWORD)\n senders = server.sendmail(FROM, TO, msg.as_string())\n server.quit()\n return senders\n",
"step-5": "import smtplib\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom datetime import datetime\nfrom threading import Thread\n\nFROM = os.getenv('EMAIL_FROM')\nTO = os.getenv('EMAIL_TO')\nHOST = os.getenv('EMAIL_HOST')\nPORT = os.getenv('EMAIL_PORT')\nPASSWORD = os.getenv('EMAIL_PASSWORD')\n\n\ndef send_email(body, subject=f'ERROR LOG [{datetime.strftime(datetime.now(), \"%b %d, %Y - %I:%M %p\")}]'):\n \"\"\"\n Sends an email with the subject formatted as 'ERROR LOG [Jan 01, 1970 - 12:00 AM]'\n \"\"\"\n\n # Send the email on a separate thread so the server doesn't\n # have to wait for it to finish\n thread = Thread(target=_send, args=(body, subject))\n thread.start()\n\n\ndef _send(body, subject):\n msg = MIMEMultipart()\n msg['From'] = FROM\n msg['To'] = TO\n msg['Subject'] = subject\n msg.attach(MIMEText(body, 'plain'))\n\n server = smtplib.SMTP(host=HOST, port=int(PORT))\n server.starttls()\n server.login(FROM, PASSWORD)\n\n senders = server.sendmail(FROM, TO, msg.as_string())\n\n server.quit()\n\n return senders\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.http.response import HttpResponse
from django.shortcuts import render , HttpResponse
import requests
from django.conf import settings
from .forms import WeatherForm
# Create your views here.
def get_weather(request):
form = WeatherForm()
error = ""
output = {}
if request.method == 'POST':
form = WeatherForm(request.POST)
if form.is_valid():
data = form.cleaned_data
latitude = data['latitude']
longitude = data['longitude']
url = settings.WEATHER_URL
url += "weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s"%(latitude,longitude)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64;"}
response = requests.get(url,headers=headers)
if response.status_code == 200:
output = response.json()
else:
error = response.text
return render(request=request,template_name="core/weather.html", context= {'form':form ,
'error':error , "output":output})
|
normal
|
{
"blob_id": "be5a683309317f1f6ebc20ad3511fd2b2510e806",
"index": 5535,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_weather(request):\n form = WeatherForm()\n error = ''\n output = {}\n if request.method == 'POST':\n form = WeatherForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n latitude = data['latitude']\n longitude = data['longitude']\n url = settings.WEATHER_URL\n url += 'weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s' % (\n latitude, longitude)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64;'}\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n output = response.json()\n else:\n error = response.text\n return render(request=request, template_name='core/weather.html',\n context={'form': form, 'error': error, 'output': output})\n",
"step-3": "from django.http.response import HttpResponse\nfrom django.shortcuts import render, HttpResponse\nimport requests\nfrom django.conf import settings\nfrom .forms import WeatherForm\n\n\ndef get_weather(request):\n form = WeatherForm()\n error = ''\n output = {}\n if request.method == 'POST':\n form = WeatherForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n latitude = data['latitude']\n longitude = data['longitude']\n url = settings.WEATHER_URL\n url += 'weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s' % (\n latitude, longitude)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64;'}\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n output = response.json()\n else:\n error = response.text\n return render(request=request, template_name='core/weather.html',\n context={'form': form, 'error': error, 'output': output})\n",
"step-4": "from django.http.response import HttpResponse\nfrom django.shortcuts import render , HttpResponse\nimport requests\nfrom django.conf import settings\nfrom .forms import WeatherForm\n# Create your views here.\n\ndef get_weather(request):\n form = WeatherForm()\n error = \"\"\n output = {}\n if request.method == 'POST':\n form = WeatherForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n latitude = data['latitude']\n longitude = data['longitude']\n url = settings.WEATHER_URL\n url += \"weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s\"%(latitude,longitude)\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64;\"}\n response = requests.get(url,headers=headers)\n if response.status_code == 200:\n output = response.json()\n else:\n error = response.text\n return render(request=request,template_name=\"core/weather.html\", context= {'form':form ,\n 'error':error , \"output\":output})",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .variational_legacy import *
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .variational_legacy import *
|
flexible
|
{
"blob_id": "ea07cb640e76ced8be92b55ee14e1d3058e073c9",
"index": 845,
"step-1": "<mask token>\n",
"step-2": "from .variational_legacy import *\n",
"step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom .variational_legacy import *\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
githubName = request.form.get('githubname')
responseUser = requests.get('{}{}'.format(base_url, githubName))
responseRepos = requests.get('{}{}/repos'.format(base_url, githubName))
userInfo = responseUser.json()
userRepos = responseRepos.json()
if 'message' in userInfo:
return render_template('index.html', error='Kullanıcı Bulunamadı')
return render_template('index.html', profile=userInfo, repos=userRepos)
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
base_url = 'https://api.github.com/users/'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
githubName = request.form.get('githubname')
responseUser = requests.get('{}{}'.format(base_url, githubName))
responseRepos = requests.get('{}{}/repos'.format(base_url, githubName))
userInfo = responseUser.json()
userRepos = responseRepos.json()
if 'message' in userInfo:
return render_template('index.html', error='Kullanıcı Bulunamadı')
return render_template('index.html', profile=userInfo, repos=userRepos)
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, render_template, request
import requests
app = Flask(__name__)
base_url = 'https://api.github.com/users/'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
githubName = request.form.get('githubname')
responseUser = requests.get('{}{}'.format(base_url, githubName))
responseRepos = requests.get('{}{}/repos'.format(base_url, githubName))
userInfo = responseUser.json()
userRepos = responseRepos.json()
if 'message' in userInfo:
return render_template('index.html', error='Kullanıcı Bulunamadı')
return render_template('index.html', profile=userInfo, repos=userRepos)
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import (
Flask,
render_template,
request
)
import requests
app = Flask(__name__)
base_url = "https://api.github.com/users/"
@app.route("/", methods = ["GET", "POST"])
def index():
if request.method == "POST":
githubName = request.form.get("githubname")
responseUser = requests.get("{}{}".format(base_url, githubName))
responseRepos = requests.get("{}{}/repos".format(base_url, githubName))
userInfo = responseUser.json()
userRepos = responseRepos.json()
if "message" in userInfo:
return render_template("index.html", error = "Kullanıcı Bulunamadı")
return render_template("index.html", profile = userInfo , repos = userRepos)
return render_template("index.html")
if __name__ == "__main__":
app.run(debug = True)
|
flexible
|
{
"blob_id": "62094d036596f39e7cf936fe7a91e67d53ee055e",
"index": 9557,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n githubName = request.form.get('githubname')\n responseUser = requests.get('{}{}'.format(base_url, githubName))\n responseRepos = requests.get('{}{}/repos'.format(base_url, githubName))\n userInfo = responseUser.json()\n userRepos = responseRepos.json()\n if 'message' in userInfo:\n return render_template('index.html', error='Kullanıcı Bulunamadı')\n return render_template('index.html', profile=userInfo, repos=userRepos)\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nbase_url = 'https://api.github.com/users/'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n githubName = request.form.get('githubname')\n responseUser = requests.get('{}{}'.format(base_url, githubName))\n responseRepos = requests.get('{}{}/repos'.format(base_url, githubName))\n userInfo = responseUser.json()\n userRepos = responseRepos.json()\n if 'message' in userInfo:\n return render_template('index.html', error='Kullanıcı Bulunamadı')\n return render_template('index.html', profile=userInfo, repos=userRepos)\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request\nimport requests\napp = Flask(__name__)\nbase_url = 'https://api.github.com/users/'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n githubName = request.form.get('githubname')\n responseUser = requests.get('{}{}'.format(base_url, githubName))\n responseRepos = requests.get('{}{}/repos'.format(base_url, githubName))\n userInfo = responseUser.json()\n userRepos = responseRepos.json()\n if 'message' in userInfo:\n return render_template('index.html', error='Kullanıcı Bulunamadı')\n return render_template('index.html', profile=userInfo, repos=userRepos)\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import (\n Flask,\n render_template,\n request\n)\nimport requests\n\napp = Flask(__name__)\nbase_url = \"https://api.github.com/users/\"\n\n@app.route(\"/\", methods = [\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n githubName = request.form.get(\"githubname\")\n responseUser = requests.get(\"{}{}\".format(base_url, githubName))\n responseRepos = requests.get(\"{}{}/repos\".format(base_url, githubName))\n\n userInfo = responseUser.json()\n userRepos = responseRepos.json()\n\n if \"message\" in userInfo:\n return render_template(\"index.html\", error = \"Kullanıcı Bulunamadı\")\n return render_template(\"index.html\", profile = userInfo , repos = userRepos)\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug = True)",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class predict(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data, self.train_labels)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print('LightGBM score ' + str(rmse(self.predicted_labels, self.
val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data, self.train_labels, eval_metric=
'rmse', eval_set=[(self.train_data, self.train_labels), (self.
x_train_val, self.y_train_val)], verbose=True,
early_stopping_rounds=10)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class predict(object):
def __init__(self, trainfile, testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',
'min_data_in_leaf': 16, 'bagging_fraction': 0.85,
'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **
7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=
38, random_state=50)
self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,
min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=
0.15, seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def trainLinearRegression(self):
self.__lr.fit(self.train_data, self.train_labels)
<|reserved_special_token_0|>
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data, self.train_labels)
<|reserved_special_token_0|>
def trainLightGBM(self):
lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=
train_labels), 300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print('LightGBM score ' + str(rmse(self.predicted_labels, self.
val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data, self.train_labels, eval_metric=
'rmse', eval_set=[(self.train_data, self.train_labels), (self.
x_train_val, self.y_train_val)], verbose=True,
early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print('XGBoost score ' + str(rmse(self.predicted_labels, self.
val_labels)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
<|reserved_special_token_0|>
class predict(object):
def __init__(self, trainfile, testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',
'min_data_in_leaf': 16, 'bagging_fraction': 0.85,
'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **
7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=
38, random_state=50)
self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,
min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=
0.15, seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=
parser)
df = df.dropna()
df = df.loc[df['item_cnt_day'] > 0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',
'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.
date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',
'item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
(self.train_data, self.val_data, self.train_labels, self.val_labels
) = (train_test_split(self.train_data1, self.train_labels1,
test_size=0.3))
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=
parser)
subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']
drop_duplicate(df, sub_set=subset_test)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]
df['item_id'] = np.log1p(df['item_id'])
self.test_data = df
def data(self):
self.trainingdata()
self.testingdata()
def trainLinearRegression(self):
self.__lr.fit(self.train_data, self.train_labels)
def testLinearRegression(self):
self.predicted_labels = self.__lr.predict(self.val_data)
print('Linear Regression score ' + str(rmse(self.predicted_labels,
self.val_labels)))
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data, self.train_labels)
def testExtraTreeRegressor(self):
self.predicted_labels = self.__tree_reg.predict(self.val_data)
print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,
self.val_labels)))
def trainLightGBM(self):
lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=
train_labels), 300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print('LightGBM score ' + str(rmse(self.predicted_labels, self.
val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data, self.train_labels, eval_metric=
'rmse', eval_set=[(self.train_data, self.train_labels), (self.
x_train_val, self.y_train_val)], verbose=True,
early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print('XGBoost score ' + str(rmse(self.predicted_labels, self.
val_labels)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import os
from datetime import datetime
from sklearn.linear_model import LinearRegression, SGDRegressor
import sys
import time
import imp
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
class predict(object):
def __init__(self, trainfile, testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',
'min_data_in_leaf': 16, 'bagging_fraction': 0.85,
'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **
7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=
38, random_state=50)
self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,
min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=
0.15, seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=
parser)
df = df.dropna()
df = df.loc[df['item_cnt_day'] > 0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',
'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.
date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',
'item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
(self.train_data, self.val_data, self.train_labels, self.val_labels
) = (train_test_split(self.train_data1, self.train_labels1,
test_size=0.3))
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=
parser)
subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']
drop_duplicate(df, sub_set=subset_test)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]
df['item_id'] = np.log1p(df['item_id'])
self.test_data = df
def data(self):
self.trainingdata()
self.testingdata()
def trainLinearRegression(self):
self.__lr.fit(self.train_data, self.train_labels)
def testLinearRegression(self):
self.predicted_labels = self.__lr.predict(self.val_data)
print('Linear Regression score ' + str(rmse(self.predicted_labels,
self.val_labels)))
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data, self.train_labels)
def testExtraTreeRegressor(self):
self.predicted_labels = self.__tree_reg.predict(self.val_data)
print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,
self.val_labels)))
def trainLightGBM(self):
lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=
train_labels), 300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print('LightGBM score ' + str(rmse(self.predicted_labels, self.
val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data, self.train_labels, eval_metric=
'rmse', eval_set=[(self.train_data, self.train_labels), (self.
x_train_val, self.y_train_val)], verbose=True,
early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print('XGBoost score ' + str(rmse(self.predicted_labels, self.
val_labels)))
if __name__ == '__main__':
train_data_name = sys.argv[1]
test_data_name = sys.argv[2]
model = predict(train_data_name, test_data_name)
model.data()
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
import os
from datetime import datetime
from sklearn.linear_model import LinearRegression, SGDRegressor
import sys
import time
import imp
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import mean_squared_error
from xgboost import XGBRegressor, plot_importance
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def drop_duplicate(data, sub_set):
print('Before drop shape:', data.shape)
before = data.shape[0]
data.drop_duplicates(sub_set, keep='first', inplace=True)
data.reset_index(drop=True, inplace=True)
print('After drop shape:', data.shape)
after = data.shape[0]
print('Total Duplicate:', before - after)
def rmse(predictions, targets):
return np.sqrt(np.mean((predictions - targets) ** 2))
class predict(object):
def __init__(self,trainfile,testfile):
self.trainfile = trainfile
self.testfile = testfile
self.__lr = LinearRegression()
# self.__dtree = DecisionTreeClassifier()
# self.__rforest = RandomForestClassifier()
# self.__svm = SVC(kernel='rbf')
self.lgb_params = {
'feature_fraction': 1,
'metric': 'rmse',
'min_data_in_leaf': 16,
'bagging_fraction': 0.85,
'learning_rate': 0.03,
'objective': 'mse',
'bagging_seed': 2 ** 7,
'num_leaves': 32,
'bagging_freq': 3,
'verbose': 0
}
self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)
self._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)
self.train_data = None
self.train_labels = None
self.train_data1 = None
self.train_labels1 = None
self.val_data = None
self.val_labels = None
self.test_data = None
self.predicted_labels = None
self.x_train_val = None
self.y_train_val = None
def trainingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)
df = df.dropna()
df = df.loc[df['item_cnt_day']>0]
subset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']
drop_duplicate(df, sub_set=subset_train)
median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()
df.loc[df.item_price < 0, 'item_price'] = median
df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)
df['item_price'] = df['item_price'].clip(0, 300000)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]
df['item_id'] = np.log1p(df['item_id'])
self.train_labels1 = df['item_cnt_day']
self.train_data1 = df.drop(columns='item_cnt_day')
self.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)
self.x_train_val = self.train_data[-100:]
self.y_train_val = self.train_labels[-100:]
def testingdata(self):
parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')
df = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)
subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']
drop_duplicate(df, sub_set=subset_test)
df.loc[df.shop_id == 0, 'shop_id'] = 57
df.loc[df.shop_id == 1, 'shop_id'] = 58
df.loc[df.shop_id == 10, 'shop_id'] = 11
df['day'] = df['date'].apply(lambda x: x.strftime('%d'))
df['day'] = df['day'].astype('int64')
df['month'] = df['date'].apply(lambda x: x.strftime('%m'))
df['month'] = df['month'].astype('int64')
df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))
df['year'] = df['year'].astype('int64')
df = df[['day','month','year','item_id', 'shop_id','item_price']]
df['item_id'] = np.log1p(df['item_id'])
self.test_data = df;
def data(self):
self.trainingdata()
self.testingdata()
def trainLinearRegression(self):
self.__lr.fit(self.train_data,self.train_labels)
def testLinearRegression(self):
self.predicted_labels = self.__lr.predict(self.val_data)
# print ("Linear Regression score " + str(self.__lr.score(self.val_data, self.val_labels)))
print ("Linear Regression score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainExtraTreeRegressor(self):
self.__tree_reg.fit(self.train_data,self.train_labels)
def testExtraTreeRegressor(self):
self.predicted_labels = self.__tree_reg.predict(self.val_data)
print ("ExtraTreeRegressor score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainLightGBM(self):
lgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)
def testLightGBM(self):
self.predicted_labels = lgb.predict(self.val_data)
print ("LightGBM score " + str(rmse(self.predicted_labels,self.val_labels)))
def trainXGBoost(self):
self.__xgb.fit(self.train_data,self.train_labels,eval_metric="rmse",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)
def testXGBoost(self):
self.predicted_labels = self.__xgb.predict(self.val_data)
print ("XGBoost score " + str(rmse(self.predicted_labels,self.val_labels)))
if __name__ == "__main__":
train_data_name = sys.argv[1]
test_data_name = sys.argv[2]
model = predict(train_data_name,test_data_name)
model.data()
# model.trainLinearRegression()
# model.testLinearRegression()
# model.trainExtraTreeRegressor()
# model.testExtraTreeRegressor()
# model.trainLightGBM()
# model.testLightGBM()
# model.trainXGBoost()
# model.testXGBoost()
# plotConfusionMatrix(model.test_labels,model.predicted_labels)
# model.trainDecesionTree()
# model.testDecesionTree()
# model.trainRandomForrest()
# model.testRandomForrest()
# model.trainSVM()
# model.testSVM()
|
flexible
|
{
"blob_id": "ee49ce63951721458cb98b370285d04231bb2c20",
"index": 7438,
"step-1": "<mask token>\n\n\nclass predict(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n <mask token>\n <mask token>\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n <mask token>\n <mask token>\n <mask token>\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n <mask token>\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n <mask token>\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\n\n<mask token>\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n\n def trainingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=\n parser)\n df = df.dropna()\n df = df.loc[df['item_cnt_day'] > 0]\n subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',\n 'item_cnt_day']\n drop_duplicate(df, sub_set=subset_train)\n median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.\n date_block_num == 4) & (df.item_price > 0)].item_price.median()\n df.loc[df.item_price < 0, 'item_price'] = median\n df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n df['item_price'] = df['item_price'].clip(0, 300000)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',\n 'item_cnt_day']]\n df['item_id'] = np.log1p(df['item_id'])\n self.train_labels1 = df['item_cnt_day']\n self.train_data1 = df.drop(columns='item_cnt_day')\n (self.train_data, self.val_data, self.train_labels, self.val_labels\n ) = (train_test_split(self.train_data1, self.train_labels1,\n test_size=0.3))\n self.x_train_val = self.train_data[-100:]\n self.y_train_val = self.train_labels[-100:]\n\n def testingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=\n parser)\n subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n drop_duplicate(df, sub_set=subset_test)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]\n df['item_id'] = np.log1p(df['item_id'])\n self.test_data = df\n\n def data(self):\n self.trainingdata()\n self.testingdata()\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n\n def testLinearRegression(self):\n self.predicted_labels = self.__lr.predict(self.val_data)\n print('Linear Regression score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n\n def testExtraTreeRegressor(self):\n self.predicted_labels = self.__tree_reg.predict(self.val_data)\n print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\n<mask token>\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\nimport os\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nimport sys\nimport time\nimport imp\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, plot_importance\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\n\ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions - targets) ** 2))\n\n\nclass predict(object):\n\n def __init__(self, trainfile, testfile):\n self.trainfile = trainfile\n self.testfile = testfile\n self.__lr = LinearRegression()\n self.lgb_params = {'feature_fraction': 1, 'metric': 'rmse',\n 'min_data_in_leaf': 16, 'bagging_fraction': 0.85,\n 'learning_rate': 0.03, 'objective': 'mse', 'bagging_seed': 2 **\n 7, 'num_leaves': 32, 'bagging_freq': 3, 'verbose': 0}\n self.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=\n 38, random_state=50)\n self._xgb = XGBRegressor(max_depth=8, n_estimators=1000,\n min_child_weight=300, colsample_bytree=0.9, subsample=0.9, eta=\n 0.15, seed=42)\n self.train_data = None\n self.train_labels = None\n self.train_data1 = None\n self.train_labels1 = None\n self.val_data = None\n self.val_labels = None\n self.test_data = None\n self.predicted_labels = None\n self.x_train_val = None\n self.y_train_val = None\n\n def trainingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.trainfile, parse_dates=['date'], date_parser=\n parser)\n df = df.dropna()\n df = df.loc[df['item_cnt_day'] > 0]\n subset_train = ['date', 'date_block_num', 'shop_id', 'item_id',\n 'item_cnt_day']\n drop_duplicate(df, sub_set=subset_train)\n median = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.\n date_block_num == 4) & (df.item_price > 0)].item_price.median()\n df.loc[df.item_price < 0, 'item_price'] = median\n df['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n df['item_price'] = df['item_price'].clip(0, 300000)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price',\n 'item_cnt_day']]\n df['item_id'] = np.log1p(df['item_id'])\n self.train_labels1 = df['item_cnt_day']\n self.train_data1 = df.drop(columns='item_cnt_day')\n (self.train_data, self.val_data, self.train_labels, self.val_labels\n ) = (train_test_split(self.train_data1, self.train_labels1,\n test_size=0.3))\n self.x_train_val = self.train_data[-100:]\n self.y_train_val = self.train_labels[-100:]\n\n def testingdata(self):\n parser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n df = pd.read_csv(self.testfile, parse_dates=['date'], date_parser=\n parser)\n subset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n drop_duplicate(df, sub_set=subset_test)\n df.loc[df.shop_id == 0, 'shop_id'] = 57\n df.loc[df.shop_id == 1, 'shop_id'] = 58\n df.loc[df.shop_id == 10, 'shop_id'] = 11\n df['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n df['day'] = df['day'].astype('int64')\n df['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n df['month'] = df['month'].astype('int64')\n df['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n df['year'] = df['year'].astype('int64')\n df = df[['day', 'month', 'year', 'item_id', 'shop_id', 'item_price']]\n df['item_id'] = np.log1p(df['item_id'])\n self.test_data = df\n\n def data(self):\n self.trainingdata()\n self.testingdata()\n\n def trainLinearRegression(self):\n self.__lr.fit(self.train_data, self.train_labels)\n\n def testLinearRegression(self):\n self.predicted_labels = self.__lr.predict(self.val_data)\n print('Linear Regression score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainExtraTreeRegressor(self):\n self.__tree_reg.fit(self.train_data, self.train_labels)\n\n def testExtraTreeRegressor(self):\n self.predicted_labels = self.__tree_reg.predict(self.val_data)\n print('ExtraTreeRegressor score ' + str(rmse(self.predicted_labels,\n self.val_labels)))\n\n def trainLightGBM(self):\n lgb.train(self.lgb_params, lgb.dataset(self.train_data, label=\n train_labels), 300)\n\n def testLightGBM(self):\n self.predicted_labels = lgb.predict(self.val_data)\n print('LightGBM score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n def trainXGBoost(self):\n self.__xgb.fit(self.train_data, self.train_labels, eval_metric=\n 'rmse', eval_set=[(self.train_data, self.train_labels), (self.\n x_train_val, self.y_train_val)], verbose=True,\n early_stopping_rounds=10)\n\n def testXGBoost(self):\n self.predicted_labels = self.__xgb.predict(self.val_data)\n print('XGBoost score ' + str(rmse(self.predicted_labels, self.\n val_labels)))\n\n\nif __name__ == '__main__':\n train_data_name = sys.argv[1]\n test_data_name = sys.argv[2]\n model = predict(train_data_name, test_data_name)\n model.data()\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport numpy.random as nr\nimport math\nimport os\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression, SGDRegressor\nimport sys\nimport time\nimport imp\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom xgboost import XGBRegressor, plot_importance\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\n\n\n\n\n\ndef drop_duplicate(data, sub_set):\n print('Before drop shape:', data.shape)\n before = data.shape[0]\n data.drop_duplicates(sub_set, keep='first', inplace=True)\n data.reset_index(drop=True, inplace=True)\n print('After drop shape:', data.shape)\n after = data.shape[0]\n print('Total Duplicate:', before - after)\n\ndef rmse(predictions, targets):\n return np.sqrt(np.mean((predictions - targets) ** 2))\n\n\nclass predict(object):\n\n\tdef __init__(self,trainfile,testfile):\n\t\tself.trainfile = trainfile\n\t\tself.testfile = testfile\n\t\tself.__lr = LinearRegression()\n\t\t# self.__dtree = DecisionTreeClassifier()\n\t\t# self.__rforest = RandomForestClassifier()\n\t\t# self.__svm = SVC(kernel='rbf')\n\t\tself.lgb_params = {\n 'feature_fraction': 1,\n 'metric': 'rmse',\n 'min_data_in_leaf': 16,\n 'bagging_fraction': 0.85,\n 'learning_rate': 0.03,\n 'objective': 'mse',\n 'bagging_seed': 2 ** 7,\n 'num_leaves': 32,\n 'bagging_freq': 3,\n 'verbose': 0\n \t}\n\t\tself.__tree_reg = ExtraTreesRegressor(n_estimators=600, max_depth=38,random_state=50)\n\t\tself._xgb = XGBRegressor(max_depth=8,n_estimators=1000,min_child_weight=300,colsample_bytree=0.9,subsample=0.9,eta=0.15,seed=42)\n\t\tself.train_data = None\n\t\tself.train_labels = None\n\t\tself.train_data1 = None\n\t\tself.train_labels1 = None\n\t\tself.val_data = None\n\t\tself.val_labels = None\n\t\tself.test_data = None\n\t\tself.predicted_labels = None\n\t\tself.x_train_val = None\n\t\tself.y_train_val = None\n\n\tdef trainingdata(self):\n\t\tparser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n\t\tdf = pd.read_csv(self.trainfile,parse_dates=['date'],date_parser=parser)\n\t\tdf = df.dropna()\n\t\tdf = df.loc[df['item_cnt_day']>0]\n\t\tsubset_train = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_cnt_day']\n\t\tdrop_duplicate(df, sub_set=subset_train)\n\t\tmedian = df[(df.shop_id == 32) & (df.item_id == 2973) & (df.date_block_num == 4) & (df.item_price > 0)].item_price.median()\n\t\tdf.loc[df.item_price < 0, 'item_price'] = median\n\t\tdf['item_cnt_day'] = df['item_cnt_day'].clip(0, 1000)\n\t\tdf['item_price'] = df['item_price'].clip(0, 300000)\n\t\tdf.loc[df.shop_id == 0, 'shop_id'] = 57\n\t\tdf.loc[df.shop_id == 1, 'shop_id'] = 58\n\t\tdf.loc[df.shop_id == 10, 'shop_id'] = 11\n\t\n\t\tdf['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n\t\tdf['day'] = df['day'].astype('int64')\n\t\tdf['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n\t\tdf['month'] = df['month'].astype('int64')\n\t\tdf['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n\t\tdf['year'] = df['year'].astype('int64')\n\t\tdf = df[['day','month','year','item_id', 'shop_id','item_price','item_cnt_day']]\n\t\tdf['item_id'] = np.log1p(df['item_id'])\n\t\tself.train_labels1 = df['item_cnt_day']\n\t\tself.train_data1 = df.drop(columns='item_cnt_day')\n\t\tself.train_data,self.val_data,self.train_labels,self.val_labels=train_test_split(self.train_data1,self.train_labels1,test_size=0.3)\n\t\tself.x_train_val = self.train_data[-100:]\n\t\tself.y_train_val = self.train_labels[-100:]\n\n\n\tdef testingdata(self):\n\t\tparser = lambda date: pd.to_datetime(date, format='%d.%m.%Y')\n\t\tdf = pd.read_csv(self.testfile,parse_dates=['date'],date_parser=parser)\n\t\tsubset_test = ['date', 'date_block_num', 'shop_id', 'item_id']\n\t\tdrop_duplicate(df, sub_set=subset_test)\n\t\tdf.loc[df.shop_id == 0, 'shop_id'] = 57\n\t\tdf.loc[df.shop_id == 1, 'shop_id'] = 58\n\t\tdf.loc[df.shop_id == 10, 'shop_id'] = 11\n\t\tdf['day'] = df['date'].apply(lambda x: x.strftime('%d'))\n\t\tdf['day'] = df['day'].astype('int64')\n\t\tdf['month'] = df['date'].apply(lambda x: x.strftime('%m'))\n\t\tdf['month'] = df['month'].astype('int64')\n\t\tdf['year'] = df['date'].apply(lambda x: x.strftime('%Y'))\n\t\tdf['year'] = df['year'].astype('int64')\n\t\tdf = df[['day','month','year','item_id', 'shop_id','item_price']]\n\t\tdf['item_id'] = np.log1p(df['item_id'])\n\t\tself.test_data = df;\n\n\tdef data(self):\n\t\tself.trainingdata()\n\t\tself.testingdata()\n\n\tdef trainLinearRegression(self):\n\t\tself.__lr.fit(self.train_data,self.train_labels)\n\n\tdef testLinearRegression(self):\n\t\tself.predicted_labels = self.__lr.predict(self.val_data)\n\t\t# print (\"Linear Regression score \" + str(self.__lr.score(self.val_data, self.val_labels)))\n\t\tprint (\"Linear Regression score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainExtraTreeRegressor(self):\n\t\tself.__tree_reg.fit(self.train_data,self.train_labels)\n\n\tdef testExtraTreeRegressor(self):\n\t\tself.predicted_labels = self.__tree_reg.predict(self.val_data)\n\t\tprint (\"ExtraTreeRegressor score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainLightGBM(self):\n\t\tlgb.train(self.lgb_params,lgb.dataset(self.train_data,label=train_labels),300)\n\n\tdef testLightGBM(self):\n\t\tself.predicted_labels = lgb.predict(self.val_data)\n\t\tprint (\"LightGBM score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\tdef trainXGBoost(self):\n\t\tself.__xgb.fit(self.train_data,self.train_labels,eval_metric=\"rmse\",eval_set=[(self.train_data, self.train_labels), (self.x_train_val, self.y_train_val)],verbose=True,early_stopping_rounds=10)\n\n\tdef testXGBoost(self):\n\t\tself.predicted_labels = self.__xgb.predict(self.val_data)\n\t\tprint (\"XGBoost score \" + str(rmse(self.predicted_labels,self.val_labels)))\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\ttrain_data_name = sys.argv[1]\n\ttest_data_name = sys.argv[2]\n\tmodel = predict(train_data_name,test_data_name)\n\tmodel.data()\n\t# model.trainLinearRegression()\n\t# model.testLinearRegression()\n\n\t# model.trainExtraTreeRegressor()\n\t# model.testExtraTreeRegressor()\n\n\t# model.trainLightGBM()\n\t# model.testLightGBM()\n\n\t# model.trainXGBoost()\n\t# model.testXGBoost()\n\n\n\t# plotConfusionMatrix(model.test_labels,model.predicted_labels)\n\t\n\t# model.trainDecesionTree()\n\t# model.testDecesionTree()\n\n\t# model.trainRandomForrest()\n\t# model.testRandomForrest()\n\n\t# model.trainSVM()\n\t# model.testSVM()\n\n\n\n\n\n",
"step-ids": [
4,
8,
14,
17,
18
]
}
|
[
4,
8,
14,
17,
18
] |
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import tornado.options
import serial
import time
from datetime import timedelta
import cv2
import time
from datetime import datetime
#for webcam users
camera=cv2.VideoCapture(0)
#for picam users
#import picam
#camera=picam.OpenCVCapture()
#if you prefer to change the resolution of the image otherwise comment below 2 lines
ret = camera.set(3,320) #width
ret = camera.set(4,240) #height
#ret=camera.set(10,0.6)
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
clients = []
f=open("/home/pi/visitor_project/register.txt","a")
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print 'A Client Is Connected'
clients.append(self)
def on_message(self, message):
print 'Incoming status', message
#a=message.split("!")
if message=='who':
count=0
list1=[]
a=""
f=open("/home/pi/visitor_project/register.txt","r")
for line in f.readlines():
if len(line) != 1 :
list1.append(line)
#count=count+1
f.close()
a=''.join(map(str,list1))
self.write_message(a)
def on_close(self):
print 'Client Closed the Connecttion '
clients.remove(self)
def send_message_to_clients(msg):
for client in clients:
client.write_message(msg)
def function_second():
ret, image=camera.read()
# gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 4)
faces = face_cascade.detectMultiScale(gray,
scaleFactor=1.3,
minNeighbors=3,
minSize=(30,30),
flags=cv2.CASCADE_SCALE_IMAGE)
print "Found "+str(len(faces))+" face(s)"
#Draw a rectangle around every found face
for (x,y,w,h) in faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
if len(faces)>=1:
send_message_to_clients(str(len(faces))+" Visitors")
cv2.imwrite('/home/pi/visitor_project/result.jpg',image)
gt=datetime.now().strftime('%Y-%m-%d- %H:%M:%S - ')
m="log-"+gt+str(len(faces))+" Visitors"
f.write("\n"+m)
tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1),
function_second)
if __name__ == "__main__":
tornado.options.parse_command_line()
application=tornado.web.Application(handlers=[
(r"/ws",WSHandler),
(r'/visitor_project/(.*)',tornado.web.StaticFileHandler,{'path':'/home/pi/visitor_project'})
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(3030)
tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1),
function_second)
tornado.ioloop.IOLoop.instance().start()
|
normal
|
{
"blob_id": "1e9afe6435285da6c6efb678177587d7ba5a01b2",
"index": 1397,
"step-1": "import tornado.httpserver\nimport tornado.websocket\nimport tornado.ioloop\nimport tornado.web\nimport tornado.options\nimport serial\nimport time\nfrom datetime import timedelta\nimport cv2\nimport time\nfrom datetime import datetime\n\n\n#for webcam users\ncamera=cv2.VideoCapture(0)\n\n#for picam users\n#import picam\n#camera=picam.OpenCVCapture()\n\n\n#if you prefer to change the resolution of the image otherwise comment below 2 lines\nret = camera.set(3,320) #width \nret = camera.set(4,240) #height\n\n#ret=camera.set(10,0.6)\n\nface_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')\n\nclients = []\nf=open(\"/home/pi/visitor_project/register.txt\",\"a\") \n \n\n\n \n \nclass WSHandler(tornado.websocket.WebSocketHandler):\n \n def check_origin(self, origin):\n return True\n \n \n def open(self):\n print 'A Client Is Connected'\n clients.append(self)\n \n\n def on_message(self, message):\n \n print 'Incoming status', message\n #a=message.split(\"!\")\n \n \n \n\n\n if message=='who':\n count=0\n list1=[]\n a=\"\"\n f=open(\"/home/pi/visitor_project/register.txt\",\"r\")\n for line in f.readlines():\n \n if len(line) != 1 :\n \n \n list1.append(line)\n \n \n \n \n \n #count=count+1\n \n \n f.close()\n a=''.join(map(str,list1))\n self.write_message(a)\n \n\n\n def on_close(self):\n print 'Client Closed the Connecttion '\n clients.remove(self)\n \ndef send_message_to_clients(msg):\n for client in clients:\n client.write_message(msg)\n\n\n \n \ndef function_second():\n \n \n \n ret, image=camera.read()\n \n \n # gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)\n\n\n # faces = face_cascade.detectMultiScale(gray, 1.3, 4)\n \n faces = face_cascade.detectMultiScale(gray, \n\t\t\t\tscaleFactor=1.3, \n\t\t\t\tminNeighbors=3, \n\t\t\t\tminSize=(30,30), \n\t\t\t\tflags=cv2.CASCADE_SCALE_IMAGE) \n print \"Found \"+str(len(faces))+\" face(s)\"\n \n#Draw a rectangle around every found face\n for (x,y,w,h) in faces:\n cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)\n \n \n\t\n if len(faces)>=1:\n send_message_to_clients(str(len(faces))+\" Visitors\")\n cv2.imwrite('/home/pi/visitor_project/result.jpg',image)\n gt=datetime.now().strftime('%Y-%m-%d- %H:%M:%S - ')\n m=\"log-\"+gt+str(len(faces))+\" Visitors\"\n \n f.write(\"\\n\"+m)\n \n tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1),\n function_second) \n\nif __name__ == \"__main__\":\n \n tornado.options.parse_command_line()\n application=tornado.web.Application(handlers=[\n\n(r\"/ws\",WSHandler),\n\n\n(r'/visitor_project/(.*)',tornado.web.StaticFileHandler,{'path':'/home/pi/visitor_project'})\n\n])\n \n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(3030)\n tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1),\n function_second)\n tornado.ioloop.IOLoop.instance().start()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return([])
if not root.children:
return([root.val])
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return(result)
n5 = Node(5,None)
n6 = Node(6,None)
n3 = Node(2,None)
n4 = Node(4,None)
n2 = Node(3,[n5,n6])
n1 = Node(1,[n2,n3,n4])
s = Solution()
result = s.postorder(n1)
print(result)
|
normal
|
{
"blob_id": "93ec15a37bd5f022e8f6e226e3bf0e91cc0457c6",
"index": 2178,
"step-1": "class Node:\n <mask token>\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\n<mask token>\nprint(result)\n",
"step-4": "class Node:\n\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\n\nclass Solution(object):\n\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return []\n if not root.children:\n return [root.val]\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return result\n\n\nn5 = Node(5, None)\nn6 = Node(6, None)\nn3 = Node(2, None)\nn4 = Node(4, None)\nn2 = Node(3, [n5, n6])\nn1 = Node(1, [n2, n3, n4])\ns = Solution()\nresult = s.postorder(n1)\nprint(result)\n",
"step-5": "# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\nclass Solution(object):\n def postorder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[int]\n \"\"\"\n if not root:\n return([])\n if not root.children:\n return([root.val])\n result = []\n for child in root.children:\n result += self.postorder(child)\n result += [root.val]\n return(result)\n\n \nn5 = Node(5,None)\nn6 = Node(6,None)\nn3 = Node(2,None)\nn4 = Node(4,None)\nn2 = Node(3,[n5,n6])\nn1 = Node(1,[n2,n3,n4])\n\ns = Solution()\nresult = s.postorder(n1)\nprint(result)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.