code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fairCandySwap(A, B):
sumA, sumB = sum(A), sum(B)
setA, setB = set(A), set(B)
delta = (sumA - sumB) // 2
for j in setB:
if j + delta in setA:
return j + delta, j
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fairCandySwap(A, B):
sumA, sumB = sum(A), sum(B)
setA, setB = set(A), set(B)
delta = (sumA - sumB) // 2
for j in setB:
if j + delta in setA:
return j + delta, j
print(fairCandySwap(A=[1, 1], B=[2, 2]))
print(fairCandySwap(A=[1, 2], B=[2, 3]))
print(fairCandySwap(A=[2], B=[1, 3]))
print(fairCandySwap(A=[1, 2, 5], B=[2, 4]))
<|reserved_special_token_1|>
"""
爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 根糖果棒的大小,B[j] 是鲍勃拥有的第 j 根糖果棒的大小。
因为他们是朋友,所以他们想交换一根糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)
返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。
如果有多个答案,你可以返回其中任何一个。保证答案存在。
"""
def fairCandySwap(A, B):
sumA, sumB = sum(A), sum(B)
setA, setB = set(A), set(B)
delta = (sumA -sumB) // 2
for j in setB:
if j + delta in setA:
return (j+delta, j)
print(fairCandySwap(A = [1,1], B = [2,2]))
print(fairCandySwap(A = [1,2], B = [2,3]))
print(fairCandySwap(A = [2], B = [1,3]))
print(fairCandySwap(A = [1,2,5], B = [2,4]))
|
flexible
|
{
"blob_id": "9abc5f18e2eb07afe6bc31d6bd27298350707d1d",
"index": 962,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA - sumB) // 2\n for j in setB:\n if j + delta in setA:\n return j + delta, j\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA - sumB) // 2\n for j in setB:\n if j + delta in setA:\n return j + delta, j\n\n\nprint(fairCandySwap(A=[1, 1], B=[2, 2]))\nprint(fairCandySwap(A=[1, 2], B=[2, 3]))\nprint(fairCandySwap(A=[2], B=[1, 3]))\nprint(fairCandySwap(A=[1, 2, 5], B=[2, 4]))\n",
"step-4": "\"\"\"\n爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 根糖果棒的大小,B[j] 是鲍勃拥有的第 j 根糖果棒的大小。\n\n因为他们是朋友,所以他们想交换一根糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)\n\n返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。\n\n如果有多个答案,你可以返回其中任何一个。保证答案存在。\n\"\"\"\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA -sumB) // 2\n for j in setB:\n if j + delta in setA:\n return (j+delta, j)\n\nprint(fairCandySwap(A = [1,1], B = [2,2]))\nprint(fairCandySwap(A = [1,2], B = [2,3]))\nprint(fairCandySwap(A = [2], B = [1,3]))\nprint(fairCandySwap(A = [1,2,5], B = [2,4]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-26 16:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170308_1949'),
]
operations = [
migrations.AlterField(
model_name='deck',
name='description',
field=models.TextField(default=''),
),
]
|
normal
|
{
"blob_id": "bf3b529f8f06619c94d2dfca283df086466af4ea",
"index": 5027,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api', '0002_auto_20170308_1949')]\n operations = [migrations.AlterField(model_name='deck', name=\n 'description', field=models.TextField(default=''))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api', '0002_auto_20170308_1949')]\n operations = [migrations.AlterField(model_name='deck', name=\n 'description', field=models.TextField(default=''))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-03-26 16:51\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0002_auto_20170308_1949'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='deck',\n name='description',\n field=models.TextField(default=''),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def median(values):
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[(n + 1) // 2 - 1]
else:
return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def median(values):
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[(n + 1) // 2 - 1]
else:
return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
<|reserved_special_token_0|>
print(Q1)
print(Q2)
print(Q3)
<|reserved_special_token_1|>
n = input()
vals = list(map(int, input().split()))
def median(values):
n = len(values)
values = sorted(values)
if n % 2 == 1:
return values[(n + 1) // 2 - 1]
else:
return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)
def quartiles(values):
n = len(values)
values.sort()
Q2 = median(values)
Q1 = median(values[:int(n / 2)])
if n % 2 == 0:
Q3 = median(values[int(n / 2):])
else:
Q3 = median(values[int(n / 2 + 1):])
return Q1, Q2, Q3
Q1, Q2, Q3 = quartiles(vals)
print(Q1)
print(Q2)
print(Q3)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Enter your code here. Read input from STDIN. Print output to STDOUT
n= input()
vals= list(map(int,input().split()))
def median(values):
n=len(values)
values = sorted(values)
if n%2==1:
return values[(n+1)//2 - 1]
else:
return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)
def quartiles(values):
n=len(values)
values.sort()
Q2=median(values)
Q1=median(values[:int(n/2)])
#print ("values=",values)
if n%2==0:
Q3=median(values[int(n/2):])
else:
Q3=median(values[int(n/2+1):])
return Q1,Q2,Q3
Q1,Q2,Q3=quartiles(vals)
print(Q1)
print(Q2)
print(Q3)
|
flexible
|
{
"blob_id": "9d6b5baa8462b2996e4518dd39b5bb1efde1fd9d",
"index": 894,
"step-1": "<mask token>\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\n<mask token>\nprint(Q1)\nprint(Q2)\nprint(Q3)\n",
"step-4": "n = input()\nvals = list(map(int, input().split()))\n\n\ndef median(values):\n n = len(values)\n values = sorted(values)\n if n % 2 == 1:\n return values[(n + 1) // 2 - 1]\n else:\n return int(sum(values[int(n / 2 - 1):int(n / 2 + 1)]) / 2)\n\n\ndef quartiles(values):\n n = len(values)\n values.sort()\n Q2 = median(values)\n Q1 = median(values[:int(n / 2)])\n if n % 2 == 0:\n Q3 = median(values[int(n / 2):])\n else:\n Q3 = median(values[int(n / 2 + 1):])\n return Q1, Q2, Q3\n\n\nQ1, Q2, Q3 = quartiles(vals)\nprint(Q1)\nprint(Q2)\nprint(Q3)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# Enter your code here. Read input from STDIN. Print output to STDOUT\r\n\r\nn= input()\r\nvals= list(map(int,input().split()))\r\n\r\ndef median(values):\r\n n=len(values)\r\n values = sorted(values)\r\n if n%2==1:\r\n return values[(n+1)//2 - 1]\r\n else:\r\n return int(sum(values[int((n/2)-1):int((n/2)+1)])/2)\r\n \r\ndef quartiles(values):\r\n n=len(values)\r\n values.sort()\r\n Q2=median(values)\r\n Q1=median(values[:int(n/2)])\r\n #print (\"values=\",values)\r\n\r\n if n%2==0:\r\n Q3=median(values[int(n/2):]) \r\n\r\n else:\r\n Q3=median(values[int(n/2+1):])\r\n \r\n return Q1,Q2,Q3\r\n\r\nQ1,Q2,Q3=quartiles(vals)\r\n\r\nprint(Q1)\r\nprint(Q2)\r\nprint(Q3)\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# This is a module
class MyMath:
def isEven(num):
if(num%2==0):
return True
return False
def isOdd(num):
if(num%2==0):
return False
return True
def isPrime(num):
for i in range(2,num):
if num%i==0:
return False
return True
class Calsi:
def add(num1, num2):
return num1+num2
def sub(num1, num2):
return num1-num2
def mul(num1,num2):
return num1*num2
|
normal
|
{
"blob_id": "20d363f5d02cc0b1069aa8951999c0cb22b85613",
"index": 7578,
"step-1": "class MyMath:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Calsi:\n\n def add(num1, num2):\n return num1 + num2\n\n def sub(num1, num2):\n return num1 - num2\n\n def mul(num1, num2):\n return num1 * num2\n",
"step-2": "class MyMath:\n <mask token>\n\n def isOdd(num):\n if num % 2 == 0:\n return False\n return True\n <mask token>\n\n\nclass Calsi:\n\n def add(num1, num2):\n return num1 + num2\n\n def sub(num1, num2):\n return num1 - num2\n\n def mul(num1, num2):\n return num1 * num2\n",
"step-3": "class MyMath:\n <mask token>\n\n def isOdd(num):\n if num % 2 == 0:\n return False\n return True\n\n def isPrime(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n return True\n\n\nclass Calsi:\n\n def add(num1, num2):\n return num1 + num2\n\n def sub(num1, num2):\n return num1 - num2\n\n def mul(num1, num2):\n return num1 * num2\n",
"step-4": "class MyMath:\n\n def isEven(num):\n if num % 2 == 0:\n return True\n return False\n\n def isOdd(num):\n if num % 2 == 0:\n return False\n return True\n\n def isPrime(num):\n for i in range(2, num):\n if num % i == 0:\n return False\n return True\n\n\nclass Calsi:\n\n def add(num1, num2):\n return num1 + num2\n\n def sub(num1, num2):\n return num1 - num2\n\n def mul(num1, num2):\n return num1 * num2\n",
"step-5": "# This is a module\n\nclass MyMath:\n def isEven(num):\n if(num%2==0):\n return True\n return False\n \n def isOdd(num):\n if(num%2==0):\n return False\n return True\n \n def isPrime(num):\n for i in range(2,num):\n if num%i==0:\n return False\n return True\n \nclass Calsi:\n def add(num1, num2):\n return num1+num2\n \n def sub(num1, num2):\n return num1-num2\n \n def mul(num1,num2):\n return num1*num2\n ",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
test.sort_values(by=['building_id', 'timestamp'], inplace=True)
test = test.merge(building_metadata, on='building_id', how='left').merge(
weather_test, on=['site_id', 'timestamp'], how='left')
del building_metadata
del weather_test
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',
'site_id', 'primary_use', 'wind_direction', 'square_feet',
'dew_temperature', 'sea_level_pressure', 'wind_speed',
'precip_depth_1_hr'], inplace=True, axis=1)
test = test.interpolate()
test.drop(test[test.hour == 0].index, inplace=True)
test.drop(test[test.hour == 1].index, inplace=True)
test.drop(test[test.hour == 2].index, inplace=True)
test.drop(test[test.hour == 3].index, inplace=True)
test.drop(test[test.hour == 4].index, inplace=True)
test.drop(test[test.hour == 5].index, inplace=True)
test.drop(test[test.hour == 6].index, inplace=True)
test.drop(test[test.hour == 7].index, inplace=True)
test.drop(test[test.hour == 8].index, inplace=True)
test.drop(test[test.hour == 9].index, inplace=True)
test.drop(test[test.hour == 10].index, inplace=True)
test.drop(test[test.hour == 11].index, inplace=True)
test.drop(test[test.hour == 12].index, inplace=True)
test.drop(test[test.hour == 13].index, inplace=True)
test.drop(test[test.hour == 14].index, inplace=True)
test.drop(test[test.hour == 15].index, inplace=True)
test.drop(test[test.hour == 16].index, inplace=True)
test.drop(test[test.hour == 17].index, inplace=True)
test.drop(test[test.hour == 18].index, inplace=True)
test.drop(test[test.hour == 19].index, inplace=True)
test.drop(test[test.hour == 20].index, inplace=True)
test.drop(test[test.hour == 21].index, inplace=True)
encode = OneHotEncoder(categories='auto', drop='first')
catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',
'meter_2', 'meter_3']
encode_var = pd.DataFrame(catego_var, columns=encode_names)
test.drop('meter', inplace=True, axis=1)
test.reset_index(drop=True, inplace=True)
test = test.join(encode_var)
test.set_index('row_id', inplace=True)
return test
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
test.sort_values(by=['building_id', 'timestamp'], inplace=True)
test = test.merge(building_metadata, on='building_id', how='left').merge(
weather_test, on=['site_id', 'timestamp'], how='left')
del building_metadata
del weather_test
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',
'site_id', 'primary_use', 'wind_direction', 'square_feet',
'dew_temperature', 'sea_level_pressure', 'wind_speed',
'precip_depth_1_hr'], inplace=True, axis=1)
test = test.interpolate()
test.drop(test[test.hour == 0].index, inplace=True)
test.drop(test[test.hour == 1].index, inplace=True)
test.drop(test[test.hour == 2].index, inplace=True)
test.drop(test[test.hour == 3].index, inplace=True)
test.drop(test[test.hour == 4].index, inplace=True)
test.drop(test[test.hour == 5].index, inplace=True)
test.drop(test[test.hour == 6].index, inplace=True)
test.drop(test[test.hour == 7].index, inplace=True)
test.drop(test[test.hour == 8].index, inplace=True)
test.drop(test[test.hour == 9].index, inplace=True)
test.drop(test[test.hour == 10].index, inplace=True)
test.drop(test[test.hour == 11].index, inplace=True)
test.drop(test[test.hour == 12].index, inplace=True)
test.drop(test[test.hour == 13].index, inplace=True)
test.drop(test[test.hour == 14].index, inplace=True)
test.drop(test[test.hour == 15].index, inplace=True)
test.drop(test[test.hour == 16].index, inplace=True)
test.drop(test[test.hour == 17].index, inplace=True)
test.drop(test[test.hour == 18].index, inplace=True)
test.drop(test[test.hour == 19].index, inplace=True)
test.drop(test[test.hour == 20].index, inplace=True)
test.drop(test[test.hour == 21].index, inplace=True)
encode = OneHotEncoder(categories='auto', drop='first')
catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',
'meter_2', 'meter_3']
encode_var = pd.DataFrame(catego_var, columns=encode_names)
test.drop('meter', inplace=True, axis=1)
test.reset_index(drop=True, inplace=True)
test = test.join(encode_var)
test.set_index('row_id', inplace=True)
return test
<|reserved_special_token_0|>
print(X_test.head())
<|reserved_special_token_0|>
sub.sort_values(by='row_id', inplace=True)
sub.to_csv('./submission12.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
test.sort_values(by=['building_id', 'timestamp'], inplace=True)
test = test.merge(building_metadata, on='building_id', how='left').merge(
weather_test, on=['site_id', 'timestamp'], how='left')
del building_metadata
del weather_test
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',
'site_id', 'primary_use', 'wind_direction', 'square_feet',
'dew_temperature', 'sea_level_pressure', 'wind_speed',
'precip_depth_1_hr'], inplace=True, axis=1)
test = test.interpolate()
test.drop(test[test.hour == 0].index, inplace=True)
test.drop(test[test.hour == 1].index, inplace=True)
test.drop(test[test.hour == 2].index, inplace=True)
test.drop(test[test.hour == 3].index, inplace=True)
test.drop(test[test.hour == 4].index, inplace=True)
test.drop(test[test.hour == 5].index, inplace=True)
test.drop(test[test.hour == 6].index, inplace=True)
test.drop(test[test.hour == 7].index, inplace=True)
test.drop(test[test.hour == 8].index, inplace=True)
test.drop(test[test.hour == 9].index, inplace=True)
test.drop(test[test.hour == 10].index, inplace=True)
test.drop(test[test.hour == 11].index, inplace=True)
test.drop(test[test.hour == 12].index, inplace=True)
test.drop(test[test.hour == 13].index, inplace=True)
test.drop(test[test.hour == 14].index, inplace=True)
test.drop(test[test.hour == 15].index, inplace=True)
test.drop(test[test.hour == 16].index, inplace=True)
test.drop(test[test.hour == 17].index, inplace=True)
test.drop(test[test.hour == 18].index, inplace=True)
test.drop(test[test.hour == 19].index, inplace=True)
test.drop(test[test.hour == 20].index, inplace=True)
test.drop(test[test.hour == 21].index, inplace=True)
encode = OneHotEncoder(categories='auto', drop='first')
catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',
'meter_2', 'meter_3']
encode_var = pd.DataFrame(catego_var, columns=encode_names)
test.drop('meter', inplace=True, axis=1)
test.reset_index(drop=True, inplace=True)
test = test.join(encode_var)
test.set_index('row_id', inplace=True)
return test
<|reserved_special_token_0|>
mod_lasso = load('mod_lasso.joblib')
X_test = test_lasso()
y_pred = mod_lasso.predict(X_test)
print(X_test.head())
sub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[
'meter_reading'])
sub.sort_values(by='row_id', inplace=True)
sub.to_csv('./submission12.csv')
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GroupKFold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Lasso
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
test.sort_values(by=['building_id', 'timestamp'], inplace=True)
test = test.merge(building_metadata, on='building_id', how='left').merge(
weather_test, on=['site_id', 'timestamp'], how='left')
del building_metadata
del weather_test
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',
'site_id', 'primary_use', 'wind_direction', 'square_feet',
'dew_temperature', 'sea_level_pressure', 'wind_speed',
'precip_depth_1_hr'], inplace=True, axis=1)
test = test.interpolate()
test.drop(test[test.hour == 0].index, inplace=True)
test.drop(test[test.hour == 1].index, inplace=True)
test.drop(test[test.hour == 2].index, inplace=True)
test.drop(test[test.hour == 3].index, inplace=True)
test.drop(test[test.hour == 4].index, inplace=True)
test.drop(test[test.hour == 5].index, inplace=True)
test.drop(test[test.hour == 6].index, inplace=True)
test.drop(test[test.hour == 7].index, inplace=True)
test.drop(test[test.hour == 8].index, inplace=True)
test.drop(test[test.hour == 9].index, inplace=True)
test.drop(test[test.hour == 10].index, inplace=True)
test.drop(test[test.hour == 11].index, inplace=True)
test.drop(test[test.hour == 12].index, inplace=True)
test.drop(test[test.hour == 13].index, inplace=True)
test.drop(test[test.hour == 14].index, inplace=True)
test.drop(test[test.hour == 15].index, inplace=True)
test.drop(test[test.hour == 16].index, inplace=True)
test.drop(test[test.hour == 17].index, inplace=True)
test.drop(test[test.hour == 18].index, inplace=True)
test.drop(test[test.hour == 19].index, inplace=True)
test.drop(test[test.hour == 20].index, inplace=True)
test.drop(test[test.hour == 21].index, inplace=True)
encode = OneHotEncoder(categories='auto', drop='first')
catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',
'meter_2', 'meter_3']
encode_var = pd.DataFrame(catego_var, columns=encode_names)
test.drop('meter', inplace=True, axis=1)
test.reset_index(drop=True, inplace=True)
test = test.join(encode_var)
test.set_index('row_id', inplace=True)
return test
from joblib import dump, load
mod_lasso = load('mod_lasso.joblib')
X_test = test_lasso()
y_pred = mod_lasso.predict(X_test)
print(X_test.head())
sub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[
'meter_reading'])
sub.sort_values(by='row_id', inplace=True)
sub.to_csv('./submission12.csv')
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GroupKFold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Lasso
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
# Sort data for future imputation
test.sort_values(by=['building_id','timestamp'], inplace=True)
# Merging data
test = (test
.merge(building_metadata, on = 'building_id', how='left')
.merge(weather_test, on = ['site_id','timestamp'], how='left'))
del building_metadata
del weather_test
#Add dates variables
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
#Eliminate problematic variables
test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)
# Imputation
test = test.interpolate()
test.drop(test[test.hour==0].index, inplace=True)
test.drop(test[test.hour==1].index, inplace=True)
test.drop(test[test.hour==2].index, inplace=True)
test.drop(test[test.hour==3].index, inplace=True)
test.drop(test[test.hour==4].index, inplace=True)
test.drop(test[test.hour==5].index, inplace=True)
test.drop(test[test.hour==6].index, inplace=True)
test.drop(test[test.hour==7].index, inplace=True)
test.drop(test[test.hour==8].index, inplace=True)
test.drop(test[test.hour==9].index, inplace=True)
test.drop(test[test.hour==10].index, inplace=True)
test.drop(test[test.hour==11].index, inplace=True)
test.drop(test[test.hour==12].index, inplace=True)
test.drop(test[test.hour==13].index, inplace=True)
test.drop(test[test.hour==14].index, inplace=True)
test.drop(test[test.hour==15].index, inplace=True)
test.drop(test[test.hour==16].index, inplace=True)
test.drop(test[test.hour==17].index, inplace=True)
test.drop(test[test.hour==18].index, inplace=True)
test.drop(test[test.hour==19].index, inplace=True)
test.drop(test[test.hour==20].index, inplace=True)
test.drop(test[test.hour==21].index, inplace=True)
# One Hot Encoding
encode = OneHotEncoder(categories='auto',drop = 'first')
catego_var = test.loc[:,['building_id','meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']
encode_var = pd.DataFrame(catego_var, columns = encode_names)
test.drop('meter', inplace=True, axis = 1)
test.reset_index(drop=True,inplace=True)
test = test.join(encode_var)
# Add row as set_index
test.set_index('row_id', inplace=True)
return test
#X_train, y_train = train_lasso()
#mod_lasso = Lasso()
#mod_lasso.fit(X_train, y_train)
#print(mod_lasso.coef_)
from joblib import dump, load
mod_lasso = load('mod_lasso.joblib')
X_test = test_lasso()
y_pred = mod_lasso.predict(X_test)
print(X_test.head())
sub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])
sub.sort_values(by = 'row_id', inplace = True)
sub.to_csv('./submission12.csv')
|
flexible
|
{
"blob_id": "6028b46eab422dea02af24e9cf724fe0d8b3ecc4",
"index": 9531,
"step-1": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\nprint(X_test.head())\n<mask token>\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-3": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\nmod_lasso = load('mod_lasso.joblib')\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\nsub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[\n 'meter_reading'])\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import Lasso\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\nfrom joblib import dump, load\nmod_lasso = load('mod_lasso.joblib')\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\nsub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[\n 'meter_reading'])\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import Lasso\n\n\ndef test_lasso():\n\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n\n # Sort data for future imputation\n test.sort_values(by=['building_id','timestamp'], inplace=True)\n\n # Merging data\n test = (test\n .merge(building_metadata, on = 'building_id', how='left')\n .merge(weather_test, on = ['site_id','timestamp'], how='left'))\n\n del building_metadata\n del weather_test\n\n #Add dates variables\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n\n #Eliminate problematic variables\n test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)\n\n # Imputation\n test = test.interpolate()\n test.drop(test[test.hour==0].index, inplace=True)\n test.drop(test[test.hour==1].index, inplace=True)\n test.drop(test[test.hour==2].index, inplace=True)\n test.drop(test[test.hour==3].index, inplace=True)\n test.drop(test[test.hour==4].index, inplace=True)\n test.drop(test[test.hour==5].index, inplace=True)\n test.drop(test[test.hour==6].index, inplace=True)\n test.drop(test[test.hour==7].index, inplace=True)\n test.drop(test[test.hour==8].index, inplace=True)\n test.drop(test[test.hour==9].index, inplace=True)\n test.drop(test[test.hour==10].index, inplace=True)\n test.drop(test[test.hour==11].index, inplace=True)\n test.drop(test[test.hour==12].index, inplace=True)\n test.drop(test[test.hour==13].index, inplace=True)\n test.drop(test[test.hour==14].index, inplace=True)\n test.drop(test[test.hour==15].index, inplace=True)\n test.drop(test[test.hour==16].index, inplace=True)\n test.drop(test[test.hour==17].index, inplace=True)\n test.drop(test[test.hour==18].index, inplace=True)\n test.drop(test[test.hour==19].index, inplace=True)\n test.drop(test[test.hour==20].index, inplace=True)\n test.drop(test[test.hour==21].index, inplace=True)\n\n # One Hot Encoding\n\n encode = OneHotEncoder(categories='auto',drop = 'first')\n catego_var = test.loc[:,['building_id','meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']\n encode_var = pd.DataFrame(catego_var, columns = encode_names)\n\n test.drop('meter', inplace=True, axis = 1)\n test.reset_index(drop=True,inplace=True)\n test = test.join(encode_var)\n\n # Add row as set_index\n test.set_index('row_id', inplace=True)\n\n return test\n\n\n\n#X_train, y_train = train_lasso()\n\n#mod_lasso = Lasso()\n#mod_lasso.fit(X_train, y_train)\n\n#print(mod_lasso.coef_)\nfrom joblib import dump, load\nmod_lasso = load('mod_lasso.joblib') \n\n\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\n\nsub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])\nsub.sort_values(by = 'row_id', inplace = True)\nsub.to_csv('./submission12.csv')",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def parse(num):
strnum = str(num)
words = []
for item in range(len(strnum) - 1, -1, -1):
words.append(strnum[item])
hundred = words[:3]
thousand = words[3:6]
million = words[6:len(words)]
hundred = hundred[::-1]
thousand = thousand[::-1]
million = million[::-1]
units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine']
tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen']
tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',
'sixty', 'seventy', 'eighty', 'ninety']
reads = []
if len(million) > 0:
if len(million) == 3:
num = int(million[0])
reads.append(units[num])
reads.append('hundred')
reads.append('and')
num = int(million[1])
if num > 1:
reads.append(tens_more[num])
if num != 0:
num = int(million[2])
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million) == 2:
num = int(million[0])
if num > 1:
reads.append(tens_more[num])
num = int(million[1])
if num != 0:
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million) == 1:
num = int(million[0])
reads.append(units[num])
reads.append('million')
reads.append('and')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def parse(num):
strnum = str(num)
words = []
for item in range(len(strnum) - 1, -1, -1):
words.append(strnum[item])
hundred = words[:3]
thousand = words[3:6]
million = words[6:len(words)]
hundred = hundred[::-1]
thousand = thousand[::-1]
million = million[::-1]
units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine']
tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',
'sixteen', 'seventeen', 'eighteen', 'nineteen']
tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',
'sixty', 'seventy', 'eighty', 'ninety']
reads = []
if len(million) > 0:
if len(million) == 3:
num = int(million[0])
reads.append(units[num])
reads.append('hundred')
reads.append('and')
num = int(million[1])
if num > 1:
reads.append(tens_more[num])
if num != 0:
num = int(million[2])
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million) == 2:
num = int(million[0])
if num > 1:
reads.append(tens_more[num])
num = int(million[1])
if num != 0:
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million) == 1:
num = int(million[0])
reads.append(units[num])
reads.append('million')
reads.append('and')
if __name__ == '__main__':
parse(23456789)
<|reserved_special_token_1|>
def parse(num):
strnum = str(num)
words = []
for item in range(len(strnum)-1, -1, -1):
words.append(strnum[item])
hundred = words[:3]
thousand = words[3:6]
million = words[6:len(words)]
hundred = hundred[::-1]
thousand = thousand[::-1]
million = million[::-1]
units = ['zero','one','two','three','four','five','six','seven','eight','nine']
tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']
tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']
reads = []
if len(million)>0:
if len(million)==3:
num = int(million[0])
reads.append(units[num])
reads.append('hundred')
reads.append('and')
num = int(million[1])
if num>1:
reads.append(tens_more[num])
if num!=0:
num = int(million[2])
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million)==2:
num = int(million[0])
if num>1:
reads.append(tens_more[num])
num = int(million[1])
if num!=0:
reads.append(units[num])
else:
num = int(million[1])
reads.append(tens[num])
if len(million)==1:
num = int(million[0])
reads.append(units[num])
reads.append('million')
reads.append('and')
if __name__ == "__main__":
parse(23456789)
|
flexible
|
{
"blob_id": "843901b65a556e57470f73be2657e9fd3c0facc6",
"index": 9721,
"step-1": "<mask token>\n",
"step-2": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum) - 1, -1, -1):\n words.append(strnum[item])\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',\n 'sixty', 'seventy', 'eighty', 'ninety']\n reads = []\n if len(million) > 0:\n if len(million) == 3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n num = int(million[1])\n if num > 1:\n reads.append(tens_more[num])\n if num != 0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 2:\n num = int(million[0])\n if num > 1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num != 0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 1:\n num = int(million[0])\n reads.append(units[num])\n reads.append('million')\n reads.append('and')\n\n\n<mask token>\n",
"step-3": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum) - 1, -1, -1):\n words.append(strnum[item])\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',\n 'sixty', 'seventy', 'eighty', 'ninety']\n reads = []\n if len(million) > 0:\n if len(million) == 3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n num = int(million[1])\n if num > 1:\n reads.append(tens_more[num])\n if num != 0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 2:\n num = int(million[0])\n if num > 1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num != 0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 1:\n num = int(million[0])\n reads.append(units[num])\n reads.append('million')\n reads.append('and')\n\n\nif __name__ == '__main__':\n parse(23456789)\n",
"step-4": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum)-1, -1, -1):\n words.append(strnum[item])\n\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n\n units = ['zero','one','two','three','four','five','six','seven','eight','nine']\n tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']\n tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']\n\n reads = []\n if len(million)>0:\n if len(million)==3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n\n num = int(million[1])\n if num>1:\n reads.append(tens_more[num])\n if num!=0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n\n if len(million)==2:\n num = int(million[0])\n if num>1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num!=0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n \n if len(million)==1:\n num = int(million[0])\n reads.append(units[num])\n\n reads.append('million')\n reads.append('and')\n\nif __name__ == \"__main__\":\n parse(23456789)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
''' Model package should containt all data types for the database engine,
which means that projects like PyCIM can be included within '''
|
normal
|
{
"blob_id": "ce3c1a7210632d0a8475fe886d514eb91d3c75ac",
"index": 7700,
"step-1": "<mask token>\n",
"step-2": "''' Model package should containt all data types for the database engine, \nwhich means that projects like PyCIM can be included within '''",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(datetime.now().date() + timedelta(days=dd - nn))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dd = int(input('enter number day: '))
nn = int(datetime.now().strftime('%w')) + 1
print(datetime.now().date() + timedelta(days=dd - nn))
<|reserved_special_token_1|>
from datetime import *
dd = int(input('enter number day: '))
nn = int(datetime.now().strftime('%w')) + 1
print(datetime.now().date() + timedelta(days=dd - nn))
<|reserved_special_token_1|>
from datetime import *
dd=int(input("enter number day: "))
nn=int(datetime.now().strftime("%w"))+1
# print(dd)
# print(nn)
print((datetime.now().date())+(timedelta(days=dd-nn)))
|
flexible
|
{
"blob_id": "d3342507cb1966e14380ff28ae12b5c334abd20a",
"index": 5430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-3": "<mask token>\ndd = int(input('enter number day: '))\nnn = int(datetime.now().strftime('%w')) + 1\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-4": "from datetime import *\ndd = int(input('enter number day: '))\nnn = int(datetime.now().strftime('%w')) + 1\nprint(datetime.now().date() + timedelta(days=dd - nn))\n",
"step-5": "from datetime import *\ndd=int(input(\"enter number day: \"))\nnn=int(datetime.now().strftime(\"%w\"))+1\n# print(dd)\n# print(nn)\nprint((datetime.now().date())+(timedelta(days=dd-nn)))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Print list of files and directories
import os
def file_list(dir):
subdir_list = []
for item in os.listdir(dir):
fullpath = os.path.join(dir,item)
if os.path.isdir(fullpath):
subdir_list.append(fullpath)
else:
print(fullpath)
for d in subdir_list:
file_list(d)
file_list('D:\Workspace\test\PythonProject')
|
normal
|
{
"blob_id": "051544f41cc3c7d78210076cb9720866924ea2a1",
"index": 2942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef file_list(dir):\n subdir_list = []\n for item in os.listdir(dir):\n fullpath = os.path.join(dir, item)\n if os.path.isdir(fullpath):\n subdir_list.append(fullpath)\n else:\n print(fullpath)\n for d in subdir_list:\n file_list(d)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef file_list(dir):\n subdir_list = []\n for item in os.listdir(dir):\n fullpath = os.path.join(dir, item)\n if os.path.isdir(fullpath):\n subdir_list.append(fullpath)\n else:\n print(fullpath)\n for d in subdir_list:\n file_list(d)\n\n\nfile_list('D:\\\\Workspace\\test\\\\PythonProject')\n",
"step-4": "import os\n\n\ndef file_list(dir):\n subdir_list = []\n for item in os.listdir(dir):\n fullpath = os.path.join(dir, item)\n if os.path.isdir(fullpath):\n subdir_list.append(fullpath)\n else:\n print(fullpath)\n for d in subdir_list:\n file_list(d)\n\n\nfile_list('D:\\\\Workspace\\test\\\\PythonProject')\n",
"step-5": "# Print list of files and directories\nimport os\n\ndef file_list(dir):\n subdir_list = []\n for item in os.listdir(dir):\n fullpath = os.path.join(dir,item)\n if os.path.isdir(fullpath):\n subdir_list.append(fullpath)\n else:\n print(fullpath)\n\n for d in subdir_list:\n file_list(d)\n\nfile_list('D:\\Workspace\\test\\PythonProject')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
"""
Holds the AWS SNS email service that can be used to send emails.
"""
import boto3
import os
import cla
import uuid
import json
import datetime
from cla.models import email_service_interface
region = os.environ.get('REGION', '')
sender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')
topic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')
class SNS(email_service_interface.EmailService):
"""
AWS SNS email client model.
"""
def __init__(self):
self.region = None
self.sender_email = None
self.topic_arn = None
def initialize(self, config):
self.region = region
self.sender_email = sender_email_address
self.topic_arn = topic_arn
def send(self, subject, body, recipient, attachment=None):
msg = self.get_email_message(subject, body, self.sender_email, recipient, attachment)
# Connect to SNS.
connection = self._get_connection()
# Send the email.
try:
self._send(connection, msg)
except Exception as err:
cla.log.error('Error while sending AWS SNS email to %s: %s', recipient, str(err))
def _get_connection(self):
"""
Mockable method to get a connection to the SNS service.
"""
return boto3.client('sns', region_name=self.region)
def _send(self, connection, msg): # pylint: disable=no-self-use
"""
Mockable send method.
"""
connection.publish(
TopicArn=self.topic_arn,
Message=msg,
)
def get_email_message(self, subject, body, sender, recipients, attachment=None): # pylint: disable=too-many-arguments
"""
Helper method to get a prepared email message given the subject,
body, and recipient provided.
:param subject: The email subject
:type subject: string
:param body: The email body
:type body: string
:param sender: The sender email
:type sender: string
:param recipients: An array of recipient email addresses
:type recipient: string
:param attachment: The attachment dict (see EmailService.send() documentation).
:type: attachment: dict
:return: The json message
:rtype: string
"""
msg = {}
source = {}
data = {}
data["body"] = body
data["from"] = sender
data["subject"] = subject
data["type"] = "cla-email-event"
if isinstance(recipients, str):
data["recipients"] = [recipients]
else:
data["recipients"] = recipients
# Added MailChip/Mandrill support by setting the template and adding
# email body to the parameters list under the BODY attribute
data["template_name"] = "EasyCLA System Email Template"
data["parameters"] = {
"BODY": body
}
msg["data"] = data
source["client_id"] = "easycla-service"
source["description"] = "EasyCLA Service"
source["name"] = "EasyCLA Service"
msg["source_id"] = source
msg["id"] = str(uuid.uuid4())
msg["type"] = "cla-email-event"
msg["version"] = "0.1.0"
json_string = json.dumps(msg)
# cla.log.debug(f'Email JSON: {json_string}')
return json_string
class MockSNS(SNS):
"""
Mockable AWS SNS email client.
"""
def __init__(self):
super().__init__()
self.emails_sent = []
def _get_connection(self):
return None
def _send(self, connection, msg):
self.emails_sent.append(msg)
|
normal
|
{
"blob_id": "16dd73f2c85eff8d62cf0e605489d0db1616e36e",
"index": 8650,
"step-1": "<mask token>\n\n\nclass SNS(email_service_interface.EmailService):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-2": "<mask token>\n\n\nclass SNS(email_service_interface.EmailService):\n <mask token>\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email,\n recipient, attachment)\n connection = self._get_connection()\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s',\n recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg):\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(TopicArn=self.topic_arn, Message=msg)\n\n def get_email_message(self, subject, body, sender, recipients,\n attachment=None):\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n data['body'] = body\n data['from'] = sender\n data['subject'] = subject\n data['type'] = 'cla-email-event'\n if isinstance(recipients, str):\n data['recipients'] = [recipients]\n else:\n data['recipients'] = recipients\n data['template_name'] = 'EasyCLA System Email Template'\n data['parameters'] = {'BODY': body}\n msg['data'] = data\n source['client_id'] = 'easycla-service'\n source['description'] = 'EasyCLA Service'\n source['name'] = 'EasyCLA Service'\n msg['source_id'] = source\n msg['id'] = str(uuid.uuid4())\n msg['type'] = 'cla-email-event'\n msg['version'] = '0.1.0'\n json_string = json.dumps(msg)\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-3": "<mask token>\nregion = os.environ.get('REGION', '')\nsender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')\ntopic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')\n\n\nclass SNS(email_service_interface.EmailService):\n \"\"\"\n AWS SNS email client model.\n \"\"\"\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email,\n recipient, attachment)\n connection = self._get_connection()\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s',\n recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg):\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(TopicArn=self.topic_arn, Message=msg)\n\n def get_email_message(self, subject, body, sender, recipients,\n attachment=None):\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n data['body'] = body\n data['from'] = sender\n data['subject'] = subject\n data['type'] = 'cla-email-event'\n if isinstance(recipients, str):\n data['recipients'] = [recipients]\n else:\n data['recipients'] = recipients\n data['template_name'] = 'EasyCLA System Email Template'\n data['parameters'] = {'BODY': body}\n msg['data'] = data\n source['client_id'] = 'easycla-service'\n source['description'] = 'EasyCLA Service'\n source['name'] = 'EasyCLA Service'\n msg['source_id'] = source\n msg['id'] = str(uuid.uuid4())\n msg['type'] = 'cla-email-event'\n msg['version'] = '0.1.0'\n json_string = json.dumps(msg)\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-4": "<mask token>\nimport boto3\nimport os\nimport cla\nimport uuid\nimport json\nimport datetime\nfrom cla.models import email_service_interface\nregion = os.environ.get('REGION', '')\nsender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')\ntopic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')\n\n\nclass SNS(email_service_interface.EmailService):\n \"\"\"\n AWS SNS email client model.\n \"\"\"\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email,\n recipient, attachment)\n connection = self._get_connection()\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s',\n recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg):\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(TopicArn=self.topic_arn, Message=msg)\n\n def get_email_message(self, subject, body, sender, recipients,\n attachment=None):\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n data['body'] = body\n data['from'] = sender\n data['subject'] = subject\n data['type'] = 'cla-email-event'\n if isinstance(recipients, str):\n data['recipients'] = [recipients]\n else:\n data['recipients'] = recipients\n data['template_name'] = 'EasyCLA System Email Template'\n data['parameters'] = {'BODY': body}\n msg['data'] = data\n source['client_id'] = 'easycla-service'\n source['description'] = 'EasyCLA Service'\n source['name'] = 'EasyCLA Service'\n msg['source_id'] = source\n msg['id'] = str(uuid.uuid4())\n msg['type'] = 'cla-email-event'\n msg['version'] = '0.1.0'\n json_string = json.dumps(msg)\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-5": "# Copyright The Linux Foundation and each contributor to CommunityBridge.\n# SPDX-License-Identifier: MIT\n\n\"\"\"\nHolds the AWS SNS email service that can be used to send emails.\n\"\"\"\n\nimport boto3\nimport os\nimport cla\nimport uuid\nimport json\nimport datetime\nfrom cla.models import email_service_interface\n\nregion = os.environ.get('REGION', '')\nsender_email_address = os.environ.get('SES_SENDER_EMAIL_ADDRESS', '')\ntopic_arn = os.environ.get('SNS_EVENT_TOPIC_ARN', '')\n\n\nclass SNS(email_service_interface.EmailService):\n \"\"\"\n AWS SNS email client model.\n \"\"\"\n\n def __init__(self):\n self.region = None\n self.sender_email = None\n self.topic_arn = None\n\n def initialize(self, config):\n self.region = region\n self.sender_email = sender_email_address\n self.topic_arn = topic_arn\n\n def send(self, subject, body, recipient, attachment=None):\n msg = self.get_email_message(subject, body, self.sender_email, recipient, attachment)\n # Connect to SNS.\n connection = self._get_connection()\n # Send the email.\n try:\n self._send(connection, msg)\n except Exception as err:\n cla.log.error('Error while sending AWS SNS email to %s: %s', recipient, str(err))\n\n def _get_connection(self):\n \"\"\"\n Mockable method to get a connection to the SNS service.\n \"\"\"\n return boto3.client('sns', region_name=self.region)\n\n def _send(self, connection, msg): # pylint: disable=no-self-use\n \"\"\"\n Mockable send method.\n \"\"\"\n connection.publish(\n TopicArn=self.topic_arn,\n Message=msg,\n )\n\n def get_email_message(self, subject, body, sender, recipients, attachment=None): # pylint: disable=too-many-arguments\n \"\"\"\n Helper method to get a prepared email message given the subject,\n body, and recipient provided.\n\n :param subject: The email subject\n :type subject: string\n :param body: The email body\n :type body: string\n :param sender: The sender email\n :type sender: string\n :param recipients: An array of recipient email addresses\n :type recipient: string\n :param attachment: The attachment dict (see EmailService.send() documentation).\n :type: attachment: dict\n :return: The json message\n :rtype: string\n \"\"\"\n msg = {}\n source = {}\n data = {}\n\n data[\"body\"] = body\n data[\"from\"] = sender\n data[\"subject\"] = subject\n data[\"type\"] = \"cla-email-event\"\n if isinstance(recipients, str):\n data[\"recipients\"] = [recipients]\n else:\n data[\"recipients\"] = recipients\n # Added MailChip/Mandrill support by setting the template and adding\n # email body to the parameters list under the BODY attribute\n data[\"template_name\"] = \"EasyCLA System Email Template\"\n data[\"parameters\"] = {\n \"BODY\": body\n }\n\n msg[\"data\"] = data\n\n source[\"client_id\"] = \"easycla-service\"\n source[\"description\"] = \"EasyCLA Service\"\n source[\"name\"] = \"EasyCLA Service\"\n msg[\"source_id\"] = source\n\n msg[\"id\"] = str(uuid.uuid4())\n msg[\"type\"] = \"cla-email-event\"\n msg[\"version\"] = \"0.1.0\"\n json_string = json.dumps(msg)\n # cla.log.debug(f'Email JSON: {json_string}')\n return json_string\n\n\nclass MockSNS(SNS):\n \"\"\"\n Mockable AWS SNS email client.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.emails_sent = []\n\n def _get_connection(self):\n return None\n\n def _send(self, connection, msg):\n self.emails_sent.append(msg)\n",
"step-ids": [
6,
12,
14,
15,
16
]
}
|
[
6,
12,
14,
15,
16
] |
<|reserved_special_token_0|>
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
<|reserved_special_token_0|>
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \\in [0, 1]
"""
return F.normalize(x, stats['mean'], stats['std'])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
<|reserved_special_token_1|>
<|reserved_special_token_0|>
IGNORE_LABEL = 255
STATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':
{'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \\in [0, 1]
"""
return F.normalize(x, stats['mean'], stats['std'])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
<|reserved_special_token_1|>
import torch
import torchvision.transforms.functional as F
import numpy as np
import yaml
from pathlib import Path
IGNORE_LABEL = 255
STATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':
{'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \\in [0, 1]
"""
return F.normalize(x, stats['mean'], stats['std'])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
<|reserved_special_token_1|>
import torch
import torchvision.transforms.functional as F
import numpy as np
import yaml
from pathlib import Path
IGNORE_LABEL = 255
STATS = {
"vit": {"mean": (0.5, 0.5, 0.5), "std": (0.5, 0.5, 0.5)},
"deit": {"mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225)},
}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat["name"])
if "color" in cat:
colors[cat["id"]] = torch.tensor(cat["color"]).float() / 255
else:
colors[cat["id"]] = torch.tensor(cmap[cat["id"]]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \in [0, 1]
"""
return F.normalize(x, stats["mean"], stats["std"])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \in [-1, 1]
"""
mean = torch.tensor(stats["mean"])
std = torch.tensor(stats["std"])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
flexible
|
{
"blob_id": "6c641ace8f1e5e8c42fa776bd7604daf243f9a41",
"index": 2113,
"step-1": "<mask token>\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\n<mask token>\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-2": "<mask token>\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\\\in [0, 1]\n \"\"\"\n return F.normalize(x, stats['mean'], stats['std'])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-3": "<mask token>\nIGNORE_LABEL = 255\nSTATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':\n {'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\\\in [0, 1]\n \"\"\"\n return F.normalize(x, stats['mean'], stats['std'])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-4": "import torch\nimport torchvision.transforms.functional as F\nimport numpy as np\nimport yaml\nfrom pathlib import Path\nIGNORE_LABEL = 255\nSTATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':\n {'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat['name'])\n if 'color' in cat:\n colors[cat['id']] = torch.tensor(cat['color']).float() / 255\n else:\n colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\\\in [0, 1]\n \"\"\"\n return F.normalize(x, stats['mean'], stats['std'])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\\\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats['mean'])\n std = torch.tensor(stats['std'])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-5": "import torch\nimport torchvision.transforms.functional as F\nimport numpy as np\nimport yaml\nfrom pathlib import Path\n\nIGNORE_LABEL = 255\nSTATS = {\n \"vit\": {\"mean\": (0.5, 0.5, 0.5), \"std\": (0.5, 0.5, 0.5)},\n \"deit\": {\"mean\": (0.485, 0.456, 0.406), \"std\": (0.229, 0.224, 0.225)},\n}\n\n\ndef seg_to_rgb(seg, colors):\n im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()\n cls = torch.unique(seg)\n for cl in cls:\n color = colors[int(cl)]\n if len(color.shape) > 1:\n color = color[0]\n im[seg == cl] = color\n return im\n\n\ndef dataset_cat_description(path, cmap=None):\n desc = yaml.load(open(path, \"r\"), Loader=yaml.FullLoader)\n colors = {}\n names = []\n for i, cat in enumerate(desc):\n names.append(cat[\"name\"])\n if \"color\" in cat:\n colors[cat[\"id\"]] = torch.tensor(cat[\"color\"]).float() / 255\n else:\n colors[cat[\"id\"]] = torch.tensor(cmap[cat[\"id\"]]).float()\n colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()\n return names, colors\n\n\ndef rgb_normalize(x, stats):\n \"\"\"\n x : C x *\n x \\in [0, 1]\n \"\"\"\n return F.normalize(x, stats[\"mean\"], stats[\"std\"])\n\n\ndef rgb_denormalize(x, stats):\n \"\"\"\n x : N x C x *\n x \\in [-1, 1]\n \"\"\"\n mean = torch.tensor(stats[\"mean\"])\n std = torch.tensor(stats[\"std\"])\n for i in range(3):\n x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]\n return x\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
{'module_spec': {'module_name': 'Spec1'}}
<|reserved_special_token_1|>
{
"module_spec": {
"module_name": "Spec1"
}
}
|
flexible
|
{
"blob_id": "1cfb0690ebe1d7c6ab93fa6a4bc959b90b991bc8",
"index": 7016,
"step-1": "<mask token>\n",
"step-2": "{'module_spec': {'module_name': 'Spec1'}}\n",
"step-3": "{\n \"module_spec\": {\n \"module_name\": \"Spec1\"\n }\n}\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, msg_count):
msg = {'id': i + 20, 'payload': 'Here is test message {}'.format(i + 20)}
sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
msg_count = 50
producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
for i in range(0, msg_count):
msg = {'id': i + 20, 'payload': 'Here is test message {}'.format(i + 20)}
sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8'))
<|reserved_special_token_1|>
from kafka import KafkaProducer
import json
msg_count = 50
producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
for i in range(0, msg_count):
msg = {'id': i + 20, 'payload': 'Here is test message {}'.format(i + 20)}
sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8'))
|
flexible
|
{
"blob_id": "d763485e417900044d7ce3a63ef7ec2def115f05",
"index": 7263,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, msg_count):\n msg = {'id': i + 20, 'payload': 'Here is test message {}'.format(i + 20)}\n sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8'))\n",
"step-3": "<mask token>\nmsg_count = 50\nproducer = KafkaProducer(bootstrap_servers=['localhost:9092'])\nfor i in range(0, msg_count):\n msg = {'id': i + 20, 'payload': 'Here is test message {}'.format(i + 20)}\n sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8'))\n",
"step-4": "from kafka import KafkaProducer\nimport json\nmsg_count = 50\nproducer = KafkaProducer(bootstrap_servers=['localhost:9092'])\nfor i in range(0, msg_count):\n msg = {'id': i + 20, 'payload': 'Here is test message {}'.format(i + 20)}\n sent = producer.send('test-topic2', bytes(json.dumps(msg), 'utf-8'))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
def get_os_env_value(key):
return os.getenv(key)
def get_mysql_uri(user, password, host, database):
return f'mysql+pymysql://{user}:{password}@{host}/{database}'
MASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')
MASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD')
MASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')
MASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE')
# SQLALCHEMY_POOL_RECYCLE = 60 * 10
# SQLALCHEMY_POOL_TIMEOUT = 60 * 20
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD,
MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE)
SQLALCHEMY_ENGINE_OPTIONS = {
"pool_pre_ping": True
}
|
normal
|
{
"blob_id": "8247b045a5aed4d0f3db6bc2c0edd985f2c4ba30",
"index": 5305,
"step-1": "<mask token>\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_DB_CASAONE')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,\n MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,\n MASTER_MYSQL_DATABASE_DB_CASAONE)\nSQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}\n",
"step-4": "import os\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value(\n 'MASTER_MYSQL_DATABASE_DB_CASAONE')\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER,\n MASTER_MYSQL_DATABASE_PASSWORD, MASTER_MYSQL_DATABASE_HOST,\n MASTER_MYSQL_DATABASE_DB_CASAONE)\nSQLALCHEMY_ENGINE_OPTIONS = {'pool_pre_ping': True}\n",
"step-5": "import os\n\n\ndef get_os_env_value(key):\n return os.getenv(key)\n\n\ndef get_mysql_uri(user, password, host, database):\n return f'mysql+pymysql://{user}:{password}@{host}/{database}'\n\n\nMASTER_MYSQL_DATABASE_USER = get_os_env_value('MASTER_MYSQL_DATABASE_USER')\nMASTER_MYSQL_DATABASE_PASSWORD = get_os_env_value('MASTER_MYSQL_DATABASE_PASSWORD')\nMASTER_MYSQL_DATABASE_HOST = get_os_env_value('MASTER_MYSQL_DATABASE_HOST')\nMASTER_MYSQL_DATABASE_DB_CASAONE = get_os_env_value('MASTER_MYSQL_DATABASE_DB_CASAONE')\n\n# SQLALCHEMY_POOL_RECYCLE = 60 * 10\n# SQLALCHEMY_POOL_TIMEOUT = 60 * 20\nSQLALCHEMY_TRACK_MODIFICATIONS = True\nSQLALCHEMY_ECHO = True\n\nSQLALCHEMY_DATABASE_URI = get_mysql_uri(MASTER_MYSQL_DATABASE_USER, MASTER_MYSQL_DATABASE_PASSWORD,\n MASTER_MYSQL_DATABASE_HOST, MASTER_MYSQL_DATABASE_DB_CASAONE)\n\nSQLALCHEMY_ENGINE_OPTIONS = {\n \"pool_pre_ping\": True\n}\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PasswordChangeForm(forms.Form):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PasswordChangeForm(forms.Form):
password = forms.CharField(min_length=8, label='New Password*', strip=
False, widget=forms.PasswordInput(attrs={'autocomplete':
'current-password', 'class': 'form-control'}))
<|reserved_special_token_1|>
from django import forms
class PasswordChangeForm(forms.Form):
password = forms.CharField(min_length=8, label='New Password*', strip=
False, widget=forms.PasswordInput(attrs={'autocomplete':
'current-password', 'class': 'form-control'}))
<|reserved_special_token_1|>
from django import forms
class PasswordChangeForm(forms.Form):
password = forms.CharField(min_length=8,
label="New Password*",
strip=False,
widget=forms.PasswordInput(
attrs={'autocomplete': 'current-password', 'class': 'form-control'}),
)
|
flexible
|
{
"blob_id": "85fff1f6e1f69dd0e2e9b5acc90db31d27329c7c",
"index": 3352,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PasswordChangeForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PasswordChangeForm(forms.Form):\n password = forms.CharField(min_length=8, label='New Password*', strip=\n False, widget=forms.PasswordInput(attrs={'autocomplete':\n 'current-password', 'class': 'form-control'}))\n",
"step-4": "from django import forms\n\n\nclass PasswordChangeForm(forms.Form):\n password = forms.CharField(min_length=8, label='New Password*', strip=\n False, widget=forms.PasswordInput(attrs={'autocomplete':\n 'current-password', 'class': 'form-control'}))\n",
"step-5": "from django import forms\n\n\nclass PasswordChangeForm(forms.Form):\n password = forms.CharField(min_length=8,\n label=\"New Password*\",\n strip=False,\n widget=forms.PasswordInput(\n attrs={'autocomplete': 'current-password', 'class': 'form-control'}),\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class StaticTemplateList(TemplateList):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StaticTemplateList(TemplateList):
def __init__(self, viewMode=None):
TemplateList.__init__(self, viewMode)
<|reserved_special_token_0|>
def getFeatureName(self):
return 'static'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StaticTemplateList(TemplateList):
def __init__(self, viewMode=None):
TemplateList.__init__(self, viewMode)
def getList(self):
return [['graphical', 'interface.html'], ['ada', 'interface.html']]
def getFeatureName(self):
return 'static'
<|reserved_special_token_1|>
from BaseClasses.TemplateList import *
class StaticTemplateList(TemplateList):
def __init__(self, viewMode=None):
TemplateList.__init__(self, viewMode)
def getList(self):
return [['graphical', 'interface.html'], ['ada', 'interface.html']]
def getFeatureName(self):
return 'static'
<|reserved_special_token_1|>
#########################################################
# Author: Todd A. Reisel
# Date: 2/24/2003
# Class: StaticTemplateList
#########################################################
from BaseClasses.TemplateList import *;
class StaticTemplateList(TemplateList):
def __init__(self, viewMode = None):
TemplateList.__init__(self, viewMode);
def getList(self):
return [ ["graphical", "interface.html"], ["ada", "interface.html"] ];
def getFeatureName(self):
return "static";
|
flexible
|
{
"blob_id": "7de3c0ab2e7c8ac00d37f1dfb5948027cfa7806c",
"index": 5084,
"step-1": "<mask token>\n\n\nclass StaticTemplateList(TemplateList):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StaticTemplateList(TemplateList):\n\n def __init__(self, viewMode=None):\n TemplateList.__init__(self, viewMode)\n <mask token>\n\n def getFeatureName(self):\n return 'static'\n",
"step-3": "<mask token>\n\n\nclass StaticTemplateList(TemplateList):\n\n def __init__(self, viewMode=None):\n TemplateList.__init__(self, viewMode)\n\n def getList(self):\n return [['graphical', 'interface.html'], ['ada', 'interface.html']]\n\n def getFeatureName(self):\n return 'static'\n",
"step-4": "from BaseClasses.TemplateList import *\n\n\nclass StaticTemplateList(TemplateList):\n\n def __init__(self, viewMode=None):\n TemplateList.__init__(self, viewMode)\n\n def getList(self):\n return [['graphical', 'interface.html'], ['ada', 'interface.html']]\n\n def getFeatureName(self):\n return 'static'\n",
"step-5": "#########################################################\n# Author: Todd A. Reisel\n# Date: 2/24/2003\n# Class: StaticTemplateList\n#########################################################\n\nfrom BaseClasses.TemplateList import *;\n\nclass StaticTemplateList(TemplateList):\n def __init__(self, viewMode = None):\n TemplateList.__init__(self, viewMode);\n \n def getList(self):\n return [ [\"graphical\", \"interface.html\"], [\"ada\", \"interface.html\"] ];\n \n def getFeatureName(self):\n return \"static\";\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gamblerProblem():
"""
Description:
This function Simulates a gambler who start with stake and place fair 1 bets until
he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of
times he/she wins and the number of bets he/she makes. Run the experiment N
times, averages the results, print the results.
"""
stake = int(input('Enter The The Stake Amount:'))
goal = int(input('Enter The Amount You Want To Win:'))
bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))
no_of_times_won = 0
no_of_time_lost = 0
no_of_bets_made = 0
while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:
no_of_bets_made += 1
gambler_choice = random.randint(0, 1)
if gambler_choice == 1:
no_of_times_won += 1
stake = stake + 1
else:
no_of_time_lost += 1
stake = stake - 1
percentage_win = no_of_times_won / bet_made * 100
print('Number Of Times Won', no_of_times_won)
print('Percentage Of Win', percentage_win)
print('Percentage Of Loss', 100 - percentage_win)
print('Number Of Bets Made', no_of_bets_made)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gamblerProblem():
"""
Description:
This function Simulates a gambler who start with stake and place fair 1 bets until
he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of
times he/she wins and the number of bets he/she makes. Run the experiment N
times, averages the results, print the results.
"""
stake = int(input('Enter The The Stake Amount:'))
goal = int(input('Enter The Amount You Want To Win:'))
bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))
no_of_times_won = 0
no_of_time_lost = 0
no_of_bets_made = 0
while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:
no_of_bets_made += 1
gambler_choice = random.randint(0, 1)
if gambler_choice == 1:
no_of_times_won += 1
stake = stake + 1
else:
no_of_time_lost += 1
stake = stake - 1
percentage_win = no_of_times_won / bet_made * 100
print('Number Of Times Won', no_of_times_won)
print('Percentage Of Win', percentage_win)
print('Percentage Of Loss', 100 - percentage_win)
print('Number Of Bets Made', no_of_bets_made)
if __name__ == '__main__':
gamblerProblem()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
def gamblerProblem():
"""
Description:
This function Simulates a gambler who start with stake and place fair 1 bets until
he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of
times he/she wins and the number of bets he/she makes. Run the experiment N
times, averages the results, print the results.
"""
stake = int(input('Enter The The Stake Amount:'))
goal = int(input('Enter The Amount You Want To Win:'))
bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))
no_of_times_won = 0
no_of_time_lost = 0
no_of_bets_made = 0
while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:
no_of_bets_made += 1
gambler_choice = random.randint(0, 1)
if gambler_choice == 1:
no_of_times_won += 1
stake = stake + 1
else:
no_of_time_lost += 1
stake = stake - 1
percentage_win = no_of_times_won / bet_made * 100
print('Number Of Times Won', no_of_times_won)
print('Percentage Of Win', percentage_win)
print('Percentage Of Loss', 100 - percentage_win)
print('Number Of Bets Made', no_of_bets_made)
if __name__ == '__main__':
gamblerProblem()
<|reserved_special_token_1|>
'''
* @Author: Mohammad Fatha.
* @Date: 2021-09-17 19:50
* @Last Modified by: Mohammad Fatha
* @Last Modified time: 2021-09-17 19:55
* @Title: Gambler Game
'''
import random
def gamblerProblem():
"""
Description:
This function Simulates a gambler who start with stake and place fair 1 bets until
he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of
times he/she wins and the number of bets he/she makes. Run the experiment N
times, averages the results, print the results.
"""
stake=int(input("Enter The The Stake Amount:"))
goal=int(input("Enter The Amount You Want To Win:"))
bet_made=int(input("Enter The The Number Of Bets You Want To Make:"))
no_of_times_won=0
no_of_time_lost=0
no_of_bets_made=0
while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made):
no_of_bets_made+=1
gambler_choice=random.randint(0, 1) #generates a random number 0 or 1
if gambler_choice==1: #if the random number generated is 0
no_of_times_won+=1
stake=stake+1
else:
no_of_time_lost+=1
stake=stake-1
percentage_win = (no_of_times_won/bet_made)*100
print("Number Of Times Won",no_of_times_won)
print("Percentage Of Win", percentage_win)
print("Percentage Of Loss", 100-percentage_win)
print("Number Of Bets Made", no_of_bets_made)
if __name__ == '__main__':
gamblerProblem()
|
flexible
|
{
"blob_id": "68904be892968d4a1d82a59a31b95a8133a30832",
"index": 8790,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\nif __name__ == '__main__':\n gamblerProblem()\n",
"step-4": "<mask token>\nimport random\n\n\ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake = int(input('Enter The The Stake Amount:'))\n goal = int(input('Enter The Amount You Want To Win:'))\n bet_made = int(input('Enter The The Number Of Bets You Want To Make:'))\n no_of_times_won = 0\n no_of_time_lost = 0\n no_of_bets_made = 0\n while stake >= 0 and stake <= goal and no_of_bets_made < bet_made:\n no_of_bets_made += 1\n gambler_choice = random.randint(0, 1)\n if gambler_choice == 1:\n no_of_times_won += 1\n stake = stake + 1\n else:\n no_of_time_lost += 1\n stake = stake - 1\n percentage_win = no_of_times_won / bet_made * 100\n print('Number Of Times Won', no_of_times_won)\n print('Percentage Of Win', percentage_win)\n print('Percentage Of Loss', 100 - percentage_win)\n print('Number Of Bets Made', no_of_bets_made)\n\n\nif __name__ == '__main__':\n gamblerProblem()\n",
"step-5": "'''\n* @Author: Mohammad Fatha.\n* @Date: 2021-09-17 19:50 \n* @Last Modified by: Mohammad Fatha\n* @Last Modified time: 2021-09-17 19:55\n* @Title: Gambler Game\n'''\nimport random\n \ndef gamblerProblem():\n \"\"\"\n Description:\n This function Simulates a gambler who start with stake and place fair 1 bets until\n he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of\n times he/she wins and the number of bets he/she makes. Run the experiment N\n times, averages the results, print the results.\n \"\"\"\n stake=int(input(\"Enter The The Stake Amount:\"))\n goal=int(input(\"Enter The Amount You Want To Win:\"))\n bet_made=int(input(\"Enter The The Number Of Bets You Want To Make:\"))\n no_of_times_won=0\n no_of_time_lost=0\n no_of_bets_made=0\n\n while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made):\n no_of_bets_made+=1\n gambler_choice=random.randint(0, 1) #generates a random number 0 or 1\n \n if gambler_choice==1: #if the random number generated is 0\n no_of_times_won+=1\n stake=stake+1 \n else:\n no_of_time_lost+=1\n stake=stake-1\n\n percentage_win = (no_of_times_won/bet_made)*100\n print(\"Number Of Times Won\",no_of_times_won)\n print(\"Percentage Of Win\", percentage_win) \n print(\"Percentage Of Loss\", 100-percentage_win)\n print(\"Number Of Bets Made\", no_of_bets_made) \n \n\nif __name__ == '__main__':\n gamblerProblem() ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging
from django.contrib.auth import get_user_model
from django.db import models
from rest_framework import serializers
from rest_framework.test import APITestCase
from ..autodocs.docs import ApiDocumentation
from .utils import Deferred
log = logging.getLogger(__name__)
def get_serializer(endpoint, method_name, dict_key='in'):
"""
Возвращает класс сериалайзера, если тот есть для данного поинта и метода.
:param `ApiEndpoint` endpoint: Поинт.
:param str method_name: Метод.
:param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.
:return: Класс сериалайзера либо None.
"""
methods = [method_name]
# Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT.
if method_name == 'PATCH':
methods.append('PUT')
for method in methods:
if method in endpoint.serializer_classes and \
isinstance(endpoint.serializer_classes[method], dict) and \
dict_key in endpoint.serializer_classes[method]:
return endpoint.serializer_classes[method][dict_key]
def resolve_deferred(value):
"""
Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.
:param any value: Любой объект.
"""
if isinstance(value, Deferred):
obj = model_instance(value.model, value.force_create)
return obj.pk
elif isinstance(value, dict):
return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()}
elif isinstance(value, list):
return [resolve_deferred(v) for v in value]
return value
def model_instance(model, force_create=False):
"""
Создание и получение экземпляра модели.
:param any model: Модель.
:param bool force_create: Не получать имеющийся объект, а создавать новый.
:return: Экзмепляр модели.
:rtype: models.Model.
"""
if not force_create and model.objects.all().count() > 0:
return model.objects.first()
data = {}
for field in model._meta.get_fields():
if not field.auto_created and not field.blank:
if hasattr(field, 'choices') and len(field.choices) > 0:
data[field.name] = field.choices[0][0]
elif isinstance(field, models.IntegerField):
data[field.name] = 1
elif isinstance(field, models.ForeignKey):
data[field.name] = model_instance(field.related_model)
elif isinstance(field, models.CharField):
data[field.name] = 'test'
return model.objects.create(**data)
class AutoTestCase(APITestCase):
"""
Класс для автоматического тестирования REST ручек.
"""
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName)
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
# Запрос со всеми данными на входе.
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path, data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
# Запрос только с обязательными данными.
data = self.prepare_request_data(self.serializer, only_required=True)
response = self.send_request(request_method, self.path, data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
# Запрос не со всеми обязательными данными.
data = self.prepare_request_data(self.serializer, only_required=True)
data.popitem()
response = self.send_request(request_method, self.path, data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
# Запрос без данных на входе.
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
def prepare_request_data(self, field, only_required=False):
"""
Подготавливает данные для запроса.
:param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.
:param bool only_required: Использовать ли только обязательные поля.
:return: Данные для отправки клиентом.
:rtype: list, dict.
"""
# Если это класс сериалайзера, а не его экземпляр.
if isinstance(field, serializers.SerializerMetaclass):
return self.prepare_request_data(field())
# Либо имеется тестовое значение установленное через `test_helper_factory`.
elif hasattr(field, 'test_helper_value'):
return resolve_deferred(field.test_helper_value)
# Либо это список.
elif isinstance(field, serializers.ListSerializer):
return [self.prepare_request_data(field.child)]
# Либо это экземпляр сериалайзера.
elif isinstance(field, serializers.BaseSerializer):
return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \
if (not only_required) or (only_required and v.required)}
# Либо это поле.
elif isinstance(field, serializers.ChoiceField):
for val, verbose in field.choices.items():
return val
elif isinstance(field, serializers.PrimaryKeyRelatedField):
return model_instance(field.queryset.model).pk
elif isinstance(field, serializers.CharField):
return 'test'
elif isinstance(field, serializers.IntegerField):
return 1
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
def check_response_data(self, data, field):
"""
Проверяем данные в ответе.
:param any data: Словарь `Response.data` либо одно из его значений.
:param any field: Сериалайзер или поле для сравнения данных в ответе.
"""
# @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна
# т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError.
'''
if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \
'out' in self.endpoint.serializer_classes[method_name]:
serializer = self.endpoint.serializer_classes[method_name]['out'](
self.endpoint.callback.cls.queryset, many=True)
self.assertEqual(response.data, serializer.data)
'''
# Если это класс сериалайзера, а не его экземпляр.
if isinstance(field, serializers.SerializerMetaclass):
return self.check_response_data(data, field())
'''
if 'results' in data and 'count' in data:
for item in data['results']:
self.check_response_data(item, out_fields)
else:
for field_name, value in data.items():
try:
field_data = fields[field_name]
except:
import pdb; pdb.set_trace()
# Проверка наличия филда среди ожидаемых в ответе
self.assertTrue(field_name in available_fields)
available_fields.remove(field_name)
if field_name in required_fields:
required_fields.remove(field_name)
if field_data['sub_fields']:
if hasattr(field_data['field_instance'], 'test_helper_as_dict'):
for key, item in data[field_name].items():
self.check_response_data(item, field_data['sub_fields'])
else:
self.check_response_data(data[field_name], field_data['sub_fields'])
else:
field_instance = field_data['field_instance']
# Проверка значения если филд обязателен или имеется значение в ответе
if field_data['required'] or value is not None:
# Проверка типа филда
self.assertEquals(type(field_instance.to_representation(value)), type(value))
# Проверка коррекности значения (иначе возникнет исключение)
# self.assertRaises(ValidationError, field_instance.to_internal_value(value))
field_instance.to_internal_value(value)
# Проверяем чтобы все обязательные поля в ответе были
self.assertEqual(len(required_fields), 0)
'''
ENDPOINTS = ApiDocumentation().get_endpoints()
ENDPOINTS = [ep for ep in ENDPOINTS]
# Собираем список запросов.
REQUESTS_LIST = []
for endpoint in ENDPOINTS:
for method in endpoint.allowed_methods:
serializer = get_serializer(endpoint, method)
if serializer:
# @TODO: Доработать тестирование без обязательных данных в запросе (without_required).
# for request_type in ('all', 'only_required', 'without_required'):
for request_type in ('all', 'only_required'):
REQUESTS_LIST.append((endpoint, method, serializer, request_type))
else:
REQUESTS_LIST.append((endpoint, method, serializer, None))
REQUESTS_DATA = {}
# Добавляем для них тестовые методы.
for endpoint, method, serializer, request_type in REQUESTS_LIST:
method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type)
REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type)
setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)
|
normal
|
{
"blob_id": "04822e735c9c27f0e0fcc9727bcc38d2da84dee6",
"index": 7831,
"step-1": "<mask token>\n\n\nclass AutoTestCase(APITestCase):\n <mask token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n <mask token>\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\n<mask token>\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\n<mask token>\n",
"step-4": "<mask token>\nlog = logging.getLogger(__name__)\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\nENDPOINTS = ApiDocumentation().get_endpoints()\nENDPOINTS = [ep for ep in ENDPOINTS]\nREQUESTS_LIST = []\nfor endpoint in ENDPOINTS:\n for method in endpoint.allowed_methods:\n serializer = get_serializer(endpoint, method)\n if serializer:\n for request_type in ('all', 'only_required'):\n REQUESTS_LIST.append((endpoint, method, serializer,\n request_type))\n else:\n REQUESTS_LIST.append((endpoint, method, serializer, None))\nREQUESTS_DATA = {}\nfor endpoint, method, serializer, request_type in REQUESTS_LIST:\n method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method,\n request_type)\n REQUESTS_DATA[method_name] = endpoint, method, serializer, request_type\n setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)\n",
"step-5": "import logging\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom rest_framework import serializers\nfrom rest_framework.test import APITestCase\n\nfrom ..autodocs.docs import ApiDocumentation\n\nfrom .utils import Deferred\n\nlog = logging.getLogger(__name__)\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n # Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT.\n if method_name == 'PATCH':\n methods.append('PUT')\n\n for method in methods:\n if method in endpoint.serializer_classes and \\\n isinstance(endpoint.serializer_classes[method], dict) and \\\n dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName)\n\n path = self.endpoint.path\n\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n\n self.path = path\n\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n\n if self.serializer:\n if self.request_type == 'all':\n # Запрос со всеми данными на входе.\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path, data, 'json')\n self.check_response_is_valid(response)\n\n elif self.request_type == 'only_required':\n # Запрос только с обязательными данными.\n data = self.prepare_request_data(self.serializer, only_required=True)\n response = self.send_request(request_method, self.path, data, 'json')\n self.check_response_is_valid(response)\n\n elif self.request_type == 'without_required':\n # Запрос не со всеми обязательными данными.\n data = self.prepare_request_data(self.serializer, only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path, data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n\n else:\n # Запрос без данных на входе.\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n # Если это класс сериалайзера, а не его экземпляр.\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n\n # Либо имеется тестовое значение установленное через `test_helper_factory`.\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n\n # Либо это список.\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n\n # Либо это экземпляр сериалайзера.\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \\\n if (not only_required) or (only_required and v.required)}\n\n # Либо это поле.\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n\n elif isinstance(field, serializers.CharField):\n return 'test'\n\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs)\n\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n # @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна\n # т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError.\n '''\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \\\n 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n '''\n # Если это класс сериалайзера, а не его экземпляр.\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n\n '''\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n '''\n\n\nENDPOINTS = ApiDocumentation().get_endpoints()\n\nENDPOINTS = [ep for ep in ENDPOINTS]\n\n# Собираем список запросов.\nREQUESTS_LIST = []\nfor endpoint in ENDPOINTS:\n for method in endpoint.allowed_methods:\n serializer = get_serializer(endpoint, method)\n if serializer:\n # @TODO: Доработать тестирование без обязательных данных в запросе (without_required).\n # for request_type in ('all', 'only_required', 'without_required'):\n for request_type in ('all', 'only_required'):\n REQUESTS_LIST.append((endpoint, method, serializer, request_type))\n else:\n REQUESTS_LIST.append((endpoint, method, serializer, None))\n\nREQUESTS_DATA = {}\n# Добавляем для них тестовые методы.\nfor endpoint, method, serializer, request_type in REQUESTS_LIST:\n method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type)\n REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type)\n setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)\n",
"step-ids": [
6,
11,
12,
14,
16
]
}
|
[
6,
11,
12,
14,
16
] |
import database
import nltk
def pop(i): # pupulate the words table
loc = i
sentencesTrial = []
File = open('words.txt')
lines = File.read()
sentences = nltk.sent_tokenize(lines)
locations = ["Castle","Beach","Beach","Ghost Town","Ghost Town","Haunted House","Jungle","Carnival", "Ghost Town", "Highway", "Castle", "Pyramid","Beach","Beach","Carnival", "Highway", "Castle" ,"Jungle" ]
for sentence in sentences:
for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):
if(pos == 'NN'):
database.nouns.append(word.lower())
sentencesTrial.append("NN")
elif (pos == 'NNS'):
database.nounsplural.append(word.lower())
sentencesTrial.append("NNS")
elif (pos == 'NNP'):
database.propernounS.append(word.lower())
sentencesTrial.append("NNP")
elif (pos == 'NNPS'):
database.propernounP.append(word.lower())
sentencesTrial.append("NNPS")
elif (pos == 'JJ'):
database.adjective.append(word.lower())
sentencesTrial.append("JJ")
elif (pos == 'VB' or pos == 'VBG' or pos == 'VBN'):
database.verbs.append(word.lower())
sentencesTrial.append("VB")
elif (pos == 'VBD'):
database.verbpast.append(word.lower())
sentencesTrial.append("VBD")
elif (pos == 'VBZ' or pos == 'VBP'):
database.verb3person.append(word.lower())
sentencesTrial.append("VBZ")
elif (pos == 'RB' or pos == 'RBR' or pos == 'RBS'):
database.adverb.append(word)
sentencesTrial.append("RB".lower())
else:
if(word == ","):
database.useless.append(word)
sentencesTrial.append(",")
break
elif(word == "."):
database.useless.append(word)
sentencesTrial.append(".")
break
else:
database.unUsedWords.append(word.lower())
break
nounCount = []
trueNouns = []
for x in database.nouns:
if x in trueNouns:
a = trueNouns.index(x)
nounCount[a] +=1
else:
trueNouns.append(x)
a = trueNouns.index(x)
nounCount.append(1)
for x in trueNouns:
i = trueNouns.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x,'NN',locations[loc],nounCount[i]))
nounpCount = []
trueNounsp = []
for x in database.nounsplural:
if x in trueNounsp:
a = trueNounsp.index(x)
nounpCount[a] += 1
else:
trueNounsp.append(x)
a = trueNounsp.index(x)
nounpCount.append(1)
for x in trueNounsp:
i = trueNounsp.index(x)
database.cursor.execute(
"INSERT INTO words VALUES (?, ?, ?, ?)",
(x, 'NNS', locations[loc], nounpCount[i]))
pnounCount = []
truepNouns = []
for x in database.propernounS:
if x in truepNouns:
a = truepNouns.index(x)
pnounCount[a] += 1
else:
truepNouns.append(x)
a = truepNouns.index(x)
pnounCount.append(1)
for x in truepNouns:
i = truepNouns.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'NNP', locations[loc], pnounCount[i]))
pnounpCount = []
truepNounsp = []
for x in database.propernounP:
if x in truepNounsp:
a = truepNounsp.index(x)
pnounpCount[a] += 1
else:
truepNounsp.append(x)
a = truepNounsp.index(x)
pnounpCount.append(1)
for x in truepNounsp:
i = truepNounsp.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'NNPS', locations[loc], pnounpCount[i]))
adjectCount = []
trueadject = []
for x in database.adjective:
if x in trueadject:
a = trueadject.index(x)
adjectCount[a] += 1
else:
trueadject.append(x)
a = trueadject.index(x)
adjectCount.append(1)
for x in trueadject:
i = trueadject.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'JJ', locations[loc], adjectCount[i]))
verbCount = []
trueVerb = []
for x in database.verbs:
if x in trueVerb:
a = trueVerb.index(x)
verbCount[a] += 1
else:
trueVerb.append(x)
a = trueVerb.index(x)
verbCount.append(1)
for x in trueVerb:
i = trueVerb.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'VB', locations[loc], verbCount[i]))
verbpCount = []
trueVerbp = []
for x in database.verbpast:
if x in trueVerbp:
a = trueVerbp.index(x)
verbpCount[a] += 1
else:
trueVerbp.append(x)
a = trueVerbp.index(x)
verbpCount.append(1)
for x in trueVerbp:
i = trueVerbp.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'VBD', locations[loc], verbpCount[i]))
verb3pCount = []
trueVerb3p = []
for x in database.verb3person:
if x in trueVerb3p:
a = trueVerb3p.index(x)
verb3pCount[a] += 1
else:
trueVerb3p.append(x)
a = trueVerb3p.index(x)
verb3pCount.append(1)
for x in trueVerb3p:
i = trueVerb3p.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'VBZ', locations[loc], verb3pCount[i]))
adverbCount = []
trueAdverb = []
for x in database.adverb:
if x in trueAdverb:
a = trueAdverb.index(x)
adverbCount[a] += 1
else:
trueAdverb.append(x)
a = trueAdverb.index(x)
adverbCount.append(1)
for x in trueAdverb:
i = trueAdverb.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'RB', locations[loc], adverbCount[i]))
uselessCount = []
trueUseless = []
for x in database.useless:
if x in trueUseless:
a = trueUseless.index(x)
uselessCount[a] += 1
else:
trueUseless.append(x)
a = trueUseless.index(x)
uselessCount.append(1)
for x in trueUseless:
i = trueUseless.index(x)
database.cursor.execute(
"INSERT INTO words VALUES (?, ?, ?, ?)",
(x, 'PU', locations[loc], uselessCount[i]))
uuWCount = []
trueuuW = []
for x in database.unUsedWords:
if x in trueuuW:
a = trueuuW.index(x)
uuWCount[a] += 1
else:
trueuuW.append(x)
a = trueuuW.index(x)
uuWCount.append(1)
for x in trueuuW:
i = trueuuW.index(x)
database.cursor.execute("INSERT INTO words VALUES (?, ?, ?, ?)", (x, 'US', locations[loc], uuWCount[i]))
def pop2(): #populate the monster and characters table
####populating the monsters
database.cursor.execute("INSERT INTO monsters VALUES ('Knight','Castle','Old Man Jenkins','Picture')")
database.cursor.execute("INSERT INTO monsters VALUES ('Vampire' , 'Castle' , 'Andrew the Tour', 'Vampire Make Up and fake blood')")
database.cursor.execute("INSERT INTO monsters VALUES ('Shadow' , 'Castle' , 'Frank the Janitor' , 'Black paint')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ghost Pirate','Beach','Bill the Lifeguard','Pirate Costume')")
database.cursor.execute("INSERT INTO monsters VALUES ('Seaweed Monster','Beach','Old Fisherman Joe','Seaweed')")
database.cursor.execute("INSERT INTO monsters VALUES ('Shark','Beach','The Mayor','Shark fins')")
database.cursor.execute("INSERT INTO monsters VALUES ('Cowboy Ghost','Ghost Town','Jerry the Businessman ','Cowboy hat')")
database.cursor.execute("INSERT INTO monsters VALUES ('Miner Ghost','Ghost Town','Gold Hunter Phil','Dusty shoes')")
database.cursor.execute("INSERT INTO monsters VALUES ('Headless Horse Man','Ghost Town','Envirnmentalist Paddy','Drawing of rig to appear headless')")
database.cursor.execute("INSERT INTO monsters VALUES ('Francinstein','Haunted House','Sir Godfree','Green paint')")
database.cursor.execute("INSERT INTO monsters VALUES ('Zombie','Haunted House','The Waiter','Zombie Make Up and fake boy parts')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ghost','Haunted House','Jimmy','Glow in the dark paint on cloths')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ape Man','Jungle','Explorer Fred','Ape Costume')")
database.cursor.execute("INSERT INTO monsters VALUES ('Animal Ghosts','Jungle','Environmentalist Jennie','Scratch Marks')")
database.cursor.execute("INSERT INTO monsters VALUES ('Pterodactyl','Jungle','Tour Guide Bill','Book on flight')")
database.cursor.execute("INSERT INTO monsters VALUES ('Clown Ghost','Carnival','Ring Master','Old Clown Costumes')")
database.cursor.execute("INSERT INTO monsters VALUES ('Zombie','Carnival','Blind Knife Thrower','Eye tests saying he is not blind')")
database.cursor.execute("INSERT INTO monsters VALUES ('Animals','Carnival','Worlds Strongest Man','Scratch marks')")
database.cursor.execute("INSERT INTO monsters VALUES ('Ghost Car','Highway','Old Town Mayor','Car ownership documents')")
database.cursor.execute("INSERT INTO monsters VALUES ('White Lady Ghost','Highway','Miss Anderson','White Dress')")
database.cursor.execute("INSERT INTO monsters VALUES ('Aliens','Highway','Conspiracy Tom','Fake Space ship blueprint')")
database.cursor.execute("INSERT INTO monsters VALUES ('Mummy','Pyramid','Museum Curator Petterson ','Bandages')")
database.cursor.execute("INSERT INTO monsters VALUES ('Sand Man','Pyramid','Ramesh the Tour Guide','Sand')")
database.cursor.execute("INSERT INTO monsters VALUES ('Sphynx','Pyramid','Tour Guide Bob','scratch marks')")
####populating the characters
database.cursor.execute("INSERT INTO characters VALUES ('Scooby Doo','Scooby Dooby Doo')")
database.cursor.execute("INSERT INTO characters VALUES ('Shaggy','Zoinks!')")
database.cursor.execute("INSERT INTO characters VALUES ('Fred','Lets Split up and look for clues')")
database.cursor.execute("INSERT INTO characters VALUES ('Velma','My glasses. I cant find my glasses')")
database.cursor.execute("INSERT INTO characters VALUES ('Daphne','Do you want a Scooby Snack')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Stormy')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Raining')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Misty')")
database.cursor.execute("INSERT INTO location VALUES ('Castle','Dark')")
database.cursor.execute("INSERT INTO location VALUES ('Beach','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Beach','Misty')")
database.cursor.execute("INSERT INTO location VALUES ('Ghost Town','Cloudy')")
database.cursor.execute("INSERT INTO location VALUES ('Ghost TOwn','Foggy')")
database.cursor.execute("INSERT INTO location VALUES ('Haunted House','Stormy')")
database.cursor.execute("INSERT INTO location VALUES ('Haunted House','Misty')")
database.cursor.execute("INSERT INTO location VALUES ('Jungle','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Jungle','Raining')")
database.cursor.execute("INSERT INTO location VALUES ('Carnival','Dark')")
database.cursor.execute("INSERT INTO location VALUES ('Carnival','Cloudy')")
database.cursor.execute("INSERT INTO location VALUES ('Carnival','Overcast')")
database.cursor.execute("INSERT INTO location VALUES ('Highway','Overcast')")
database.cursor.execute("INSERT INTO location VALUES ('Highway','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Pyramid','Overcast')")
database.cursor.execute("INSERT INTO location VALUES ('Pyramid','Sunny')")
database.cursor.execute("INSERT INTO location VALUES ('Pyramid','Raining')")
|
normal
|
{
"blob_id": "e7ac5c1010330aec81ce505fd7f52ccdeddb76de",
"index": 8923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef pop(i):\n loc = i\n sentencesTrial = []\n File = open('words.txt')\n lines = File.read()\n sentences = nltk.sent_tokenize(lines)\n locations = ['Castle', 'Beach', 'Beach', 'Ghost Town', 'Ghost Town',\n 'Haunted House', 'Jungle', 'Carnival', 'Ghost Town', 'Highway',\n 'Castle', 'Pyramid', 'Beach', 'Beach', 'Carnival', 'Highway',\n 'Castle', 'Jungle']\n for sentence in sentences:\n for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):\n if pos == 'NN':\n database.nouns.append(word.lower())\n sentencesTrial.append('NN')\n elif pos == 'NNS':\n database.nounsplural.append(word.lower())\n sentencesTrial.append('NNS')\n elif pos == 'NNP':\n database.propernounS.append(word.lower())\n sentencesTrial.append('NNP')\n elif pos == 'NNPS':\n database.propernounP.append(word.lower())\n sentencesTrial.append('NNPS')\n elif pos == 'JJ':\n database.adjective.append(word.lower())\n sentencesTrial.append('JJ')\n elif pos == 'VB' or pos == 'VBG' or pos == 'VBN':\n database.verbs.append(word.lower())\n sentencesTrial.append('VB')\n elif pos == 'VBD':\n database.verbpast.append(word.lower())\n sentencesTrial.append('VBD')\n elif pos == 'VBZ' or pos == 'VBP':\n database.verb3person.append(word.lower())\n sentencesTrial.append('VBZ')\n elif pos == 'RB' or pos == 'RBR' or pos == 'RBS':\n database.adverb.append(word)\n sentencesTrial.append('RB'.lower())\n elif word == ',':\n database.useless.append(word)\n sentencesTrial.append(',')\n break\n elif word == '.':\n database.useless.append(word)\n sentencesTrial.append('.')\n break\n else:\n database.unUsedWords.append(word.lower())\n break\n nounCount = []\n trueNouns = []\n for x in database.nouns:\n if x in trueNouns:\n a = trueNouns.index(x)\n nounCount[a] += 1\n else:\n trueNouns.append(x)\n a = trueNouns.index(x)\n nounCount.append(1)\n for x in trueNouns:\n i = trueNouns.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NN', locations[loc], nounCount[i]))\n nounpCount = []\n trueNounsp = []\n for x in database.nounsplural:\n if x in trueNounsp:\n a = trueNounsp.index(x)\n nounpCount[a] += 1\n else:\n trueNounsp.append(x)\n a = trueNounsp.index(x)\n nounpCount.append(1)\n for x in trueNounsp:\n i = trueNounsp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNS', locations[loc], nounpCount[i]))\n pnounCount = []\n truepNouns = []\n for x in database.propernounS:\n if x in truepNouns:\n a = truepNouns.index(x)\n pnounCount[a] += 1\n else:\n truepNouns.append(x)\n a = truepNouns.index(x)\n pnounCount.append(1)\n for x in truepNouns:\n i = truepNouns.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNP', locations[loc], pnounCount[i]))\n pnounpCount = []\n truepNounsp = []\n for x in database.propernounP:\n if x in truepNounsp:\n a = truepNounsp.index(x)\n pnounpCount[a] += 1\n else:\n truepNounsp.append(x)\n a = truepNounsp.index(x)\n pnounpCount.append(1)\n for x in truepNounsp:\n i = truepNounsp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNPS', locations[loc], pnounpCount[i]))\n adjectCount = []\n trueadject = []\n for x in database.adjective:\n if x in trueadject:\n a = trueadject.index(x)\n adjectCount[a] += 1\n else:\n trueadject.append(x)\n a = trueadject.index(x)\n adjectCount.append(1)\n for x in trueadject:\n i = trueadject.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'JJ', locations[loc], adjectCount[i]))\n verbCount = []\n trueVerb = []\n for x in database.verbs:\n if x in trueVerb:\n a = trueVerb.index(x)\n verbCount[a] += 1\n else:\n trueVerb.append(x)\n a = trueVerb.index(x)\n verbCount.append(1)\n for x in trueVerb:\n i = trueVerb.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VB', locations[loc], verbCount[i]))\n verbpCount = []\n trueVerbp = []\n for x in database.verbpast:\n if x in trueVerbp:\n a = trueVerbp.index(x)\n verbpCount[a] += 1\n else:\n trueVerbp.append(x)\n a = trueVerbp.index(x)\n verbpCount.append(1)\n for x in trueVerbp:\n i = trueVerbp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VBD', locations[loc], verbpCount[i]))\n verb3pCount = []\n trueVerb3p = []\n for x in database.verb3person:\n if x in trueVerb3p:\n a = trueVerb3p.index(x)\n verb3pCount[a] += 1\n else:\n trueVerb3p.append(x)\n a = trueVerb3p.index(x)\n verb3pCount.append(1)\n for x in trueVerb3p:\n i = trueVerb3p.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VBZ', locations[loc], verb3pCount[i]))\n adverbCount = []\n trueAdverb = []\n for x in database.adverb:\n if x in trueAdverb:\n a = trueAdverb.index(x)\n adverbCount[a] += 1\n else:\n trueAdverb.append(x)\n a = trueAdverb.index(x)\n adverbCount.append(1)\n for x in trueAdverb:\n i = trueAdverb.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'RB', locations[loc], adverbCount[i]))\n uselessCount = []\n trueUseless = []\n for x in database.useless:\n if x in trueUseless:\n a = trueUseless.index(x)\n uselessCount[a] += 1\n else:\n trueUseless.append(x)\n a = trueUseless.index(x)\n uselessCount.append(1)\n for x in trueUseless:\n i = trueUseless.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'PU', locations[loc], uselessCount[i]))\n uuWCount = []\n trueuuW = []\n for x in database.unUsedWords:\n if x in trueuuW:\n a = trueuuW.index(x)\n uuWCount[a] += 1\n else:\n trueuuW.append(x)\n a = trueuuW.index(x)\n uuWCount.append(1)\n for x in trueuuW:\n i = trueuuW.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'US', locations[loc], uuWCount[i]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef pop(i):\n loc = i\n sentencesTrial = []\n File = open('words.txt')\n lines = File.read()\n sentences = nltk.sent_tokenize(lines)\n locations = ['Castle', 'Beach', 'Beach', 'Ghost Town', 'Ghost Town',\n 'Haunted House', 'Jungle', 'Carnival', 'Ghost Town', 'Highway',\n 'Castle', 'Pyramid', 'Beach', 'Beach', 'Carnival', 'Highway',\n 'Castle', 'Jungle']\n for sentence in sentences:\n for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):\n if pos == 'NN':\n database.nouns.append(word.lower())\n sentencesTrial.append('NN')\n elif pos == 'NNS':\n database.nounsplural.append(word.lower())\n sentencesTrial.append('NNS')\n elif pos == 'NNP':\n database.propernounS.append(word.lower())\n sentencesTrial.append('NNP')\n elif pos == 'NNPS':\n database.propernounP.append(word.lower())\n sentencesTrial.append('NNPS')\n elif pos == 'JJ':\n database.adjective.append(word.lower())\n sentencesTrial.append('JJ')\n elif pos == 'VB' or pos == 'VBG' or pos == 'VBN':\n database.verbs.append(word.lower())\n sentencesTrial.append('VB')\n elif pos == 'VBD':\n database.verbpast.append(word.lower())\n sentencesTrial.append('VBD')\n elif pos == 'VBZ' or pos == 'VBP':\n database.verb3person.append(word.lower())\n sentencesTrial.append('VBZ')\n elif pos == 'RB' or pos == 'RBR' or pos == 'RBS':\n database.adverb.append(word)\n sentencesTrial.append('RB'.lower())\n elif word == ',':\n database.useless.append(word)\n sentencesTrial.append(',')\n break\n elif word == '.':\n database.useless.append(word)\n sentencesTrial.append('.')\n break\n else:\n database.unUsedWords.append(word.lower())\n break\n nounCount = []\n trueNouns = []\n for x in database.nouns:\n if x in trueNouns:\n a = trueNouns.index(x)\n nounCount[a] += 1\n else:\n trueNouns.append(x)\n a = trueNouns.index(x)\n nounCount.append(1)\n for x in trueNouns:\n i = trueNouns.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NN', locations[loc], nounCount[i]))\n nounpCount = []\n trueNounsp = []\n for x in database.nounsplural:\n if x in trueNounsp:\n a = trueNounsp.index(x)\n nounpCount[a] += 1\n else:\n trueNounsp.append(x)\n a = trueNounsp.index(x)\n nounpCount.append(1)\n for x in trueNounsp:\n i = trueNounsp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNS', locations[loc], nounpCount[i]))\n pnounCount = []\n truepNouns = []\n for x in database.propernounS:\n if x in truepNouns:\n a = truepNouns.index(x)\n pnounCount[a] += 1\n else:\n truepNouns.append(x)\n a = truepNouns.index(x)\n pnounCount.append(1)\n for x in truepNouns:\n i = truepNouns.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNP', locations[loc], pnounCount[i]))\n pnounpCount = []\n truepNounsp = []\n for x in database.propernounP:\n if x in truepNounsp:\n a = truepNounsp.index(x)\n pnounpCount[a] += 1\n else:\n truepNounsp.append(x)\n a = truepNounsp.index(x)\n pnounpCount.append(1)\n for x in truepNounsp:\n i = truepNounsp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNPS', locations[loc], pnounpCount[i]))\n adjectCount = []\n trueadject = []\n for x in database.adjective:\n if x in trueadject:\n a = trueadject.index(x)\n adjectCount[a] += 1\n else:\n trueadject.append(x)\n a = trueadject.index(x)\n adjectCount.append(1)\n for x in trueadject:\n i = trueadject.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'JJ', locations[loc], adjectCount[i]))\n verbCount = []\n trueVerb = []\n for x in database.verbs:\n if x in trueVerb:\n a = trueVerb.index(x)\n verbCount[a] += 1\n else:\n trueVerb.append(x)\n a = trueVerb.index(x)\n verbCount.append(1)\n for x in trueVerb:\n i = trueVerb.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VB', locations[loc], verbCount[i]))\n verbpCount = []\n trueVerbp = []\n for x in database.verbpast:\n if x in trueVerbp:\n a = trueVerbp.index(x)\n verbpCount[a] += 1\n else:\n trueVerbp.append(x)\n a = trueVerbp.index(x)\n verbpCount.append(1)\n for x in trueVerbp:\n i = trueVerbp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VBD', locations[loc], verbpCount[i]))\n verb3pCount = []\n trueVerb3p = []\n for x in database.verb3person:\n if x in trueVerb3p:\n a = trueVerb3p.index(x)\n verb3pCount[a] += 1\n else:\n trueVerb3p.append(x)\n a = trueVerb3p.index(x)\n verb3pCount.append(1)\n for x in trueVerb3p:\n i = trueVerb3p.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VBZ', locations[loc], verb3pCount[i]))\n adverbCount = []\n trueAdverb = []\n for x in database.adverb:\n if x in trueAdverb:\n a = trueAdverb.index(x)\n adverbCount[a] += 1\n else:\n trueAdverb.append(x)\n a = trueAdverb.index(x)\n adverbCount.append(1)\n for x in trueAdverb:\n i = trueAdverb.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'RB', locations[loc], adverbCount[i]))\n uselessCount = []\n trueUseless = []\n for x in database.useless:\n if x in trueUseless:\n a = trueUseless.index(x)\n uselessCount[a] += 1\n else:\n trueUseless.append(x)\n a = trueUseless.index(x)\n uselessCount.append(1)\n for x in trueUseless:\n i = trueUseless.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'PU', locations[loc], uselessCount[i]))\n uuWCount = []\n trueuuW = []\n for x in database.unUsedWords:\n if x in trueuuW:\n a = trueuuW.index(x)\n uuWCount[a] += 1\n else:\n trueuuW.append(x)\n a = trueuuW.index(x)\n uuWCount.append(1)\n for x in trueuuW:\n i = trueuuW.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'US', locations[loc], uuWCount[i]))\n\n\ndef pop2():\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Knight','Castle','Old Man Jenkins','Picture')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Vampire' , 'Castle' , 'Andrew the Tour', 'Vampire Make Up and fake blood')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Shadow' , 'Castle' , 'Frank the Janitor' , 'Black paint')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ghost Pirate','Beach','Bill the Lifeguard','Pirate Costume')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Seaweed Monster','Beach','Old Fisherman Joe','Seaweed')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Shark','Beach','The Mayor','Shark fins')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Cowboy Ghost','Ghost Town','Jerry the Businessman ','Cowboy hat')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Miner Ghost','Ghost Town','Gold Hunter Phil','Dusty shoes')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Headless Horse Man','Ghost Town','Envirnmentalist Paddy','Drawing of rig to appear headless')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Francinstein','Haunted House','Sir Godfree','Green paint')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Zombie','Haunted House','The Waiter','Zombie Make Up and fake boy parts')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ghost','Haunted House','Jimmy','Glow in the dark paint on cloths')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ape Man','Jungle','Explorer Fred','Ape Costume')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Animal Ghosts','Jungle','Environmentalist Jennie','Scratch Marks')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Pterodactyl','Jungle','Tour Guide Bill','Book on flight')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Clown Ghost','Carnival','Ring Master','Old Clown Costumes')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Zombie','Carnival','Blind Knife Thrower','Eye tests saying he is not blind')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Animals','Carnival','Worlds Strongest Man','Scratch marks')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ghost Car','Highway','Old Town Mayor','Car ownership documents')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('White Lady Ghost','Highway','Miss Anderson','White Dress')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Aliens','Highway','Conspiracy Tom','Fake Space ship blueprint')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Mummy','Pyramid','Museum Curator Petterson ','Bandages')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Sand Man','Pyramid','Ramesh the Tour Guide','Sand')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Sphynx','Pyramid','Tour Guide Bob','scratch marks')\"\n )\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Scooby Doo','Scooby Dooby Doo')\")\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Shaggy','Zoinks!')\")\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Fred','Lets Split up and look for clues')\"\n )\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Velma','My glasses. I cant find my glasses')\"\n )\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Daphne','Do you want a Scooby Snack')\"\n )\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Stormy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Raining')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Dark')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Beach','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Beach','Misty')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Ghost Town','Cloudy')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Ghost TOwn','Foggy')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Haunted House','Stormy')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Haunted House','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Jungle','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Jungle','Raining')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Dark')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Cloudy')\"\n )\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Carnival','Overcast')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Highway','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Highway','Sunny')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Pyramid','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Raining')\"\n )\n",
"step-4": "import database\nimport nltk\n\n\ndef pop(i):\n loc = i\n sentencesTrial = []\n File = open('words.txt')\n lines = File.read()\n sentences = nltk.sent_tokenize(lines)\n locations = ['Castle', 'Beach', 'Beach', 'Ghost Town', 'Ghost Town',\n 'Haunted House', 'Jungle', 'Carnival', 'Ghost Town', 'Highway',\n 'Castle', 'Pyramid', 'Beach', 'Beach', 'Carnival', 'Highway',\n 'Castle', 'Jungle']\n for sentence in sentences:\n for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):\n if pos == 'NN':\n database.nouns.append(word.lower())\n sentencesTrial.append('NN')\n elif pos == 'NNS':\n database.nounsplural.append(word.lower())\n sentencesTrial.append('NNS')\n elif pos == 'NNP':\n database.propernounS.append(word.lower())\n sentencesTrial.append('NNP')\n elif pos == 'NNPS':\n database.propernounP.append(word.lower())\n sentencesTrial.append('NNPS')\n elif pos == 'JJ':\n database.adjective.append(word.lower())\n sentencesTrial.append('JJ')\n elif pos == 'VB' or pos == 'VBG' or pos == 'VBN':\n database.verbs.append(word.lower())\n sentencesTrial.append('VB')\n elif pos == 'VBD':\n database.verbpast.append(word.lower())\n sentencesTrial.append('VBD')\n elif pos == 'VBZ' or pos == 'VBP':\n database.verb3person.append(word.lower())\n sentencesTrial.append('VBZ')\n elif pos == 'RB' or pos == 'RBR' or pos == 'RBS':\n database.adverb.append(word)\n sentencesTrial.append('RB'.lower())\n elif word == ',':\n database.useless.append(word)\n sentencesTrial.append(',')\n break\n elif word == '.':\n database.useless.append(word)\n sentencesTrial.append('.')\n break\n else:\n database.unUsedWords.append(word.lower())\n break\n nounCount = []\n trueNouns = []\n for x in database.nouns:\n if x in trueNouns:\n a = trueNouns.index(x)\n nounCount[a] += 1\n else:\n trueNouns.append(x)\n a = trueNouns.index(x)\n nounCount.append(1)\n for x in trueNouns:\n i = trueNouns.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NN', locations[loc], nounCount[i]))\n nounpCount = []\n trueNounsp = []\n for x in database.nounsplural:\n if x in trueNounsp:\n a = trueNounsp.index(x)\n nounpCount[a] += 1\n else:\n trueNounsp.append(x)\n a = trueNounsp.index(x)\n nounpCount.append(1)\n for x in trueNounsp:\n i = trueNounsp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNS', locations[loc], nounpCount[i]))\n pnounCount = []\n truepNouns = []\n for x in database.propernounS:\n if x in truepNouns:\n a = truepNouns.index(x)\n pnounCount[a] += 1\n else:\n truepNouns.append(x)\n a = truepNouns.index(x)\n pnounCount.append(1)\n for x in truepNouns:\n i = truepNouns.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNP', locations[loc], pnounCount[i]))\n pnounpCount = []\n truepNounsp = []\n for x in database.propernounP:\n if x in truepNounsp:\n a = truepNounsp.index(x)\n pnounpCount[a] += 1\n else:\n truepNounsp.append(x)\n a = truepNounsp.index(x)\n pnounpCount.append(1)\n for x in truepNounsp:\n i = truepNounsp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'NNPS', locations[loc], pnounpCount[i]))\n adjectCount = []\n trueadject = []\n for x in database.adjective:\n if x in trueadject:\n a = trueadject.index(x)\n adjectCount[a] += 1\n else:\n trueadject.append(x)\n a = trueadject.index(x)\n adjectCount.append(1)\n for x in trueadject:\n i = trueadject.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'JJ', locations[loc], adjectCount[i]))\n verbCount = []\n trueVerb = []\n for x in database.verbs:\n if x in trueVerb:\n a = trueVerb.index(x)\n verbCount[a] += 1\n else:\n trueVerb.append(x)\n a = trueVerb.index(x)\n verbCount.append(1)\n for x in trueVerb:\n i = trueVerb.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VB', locations[loc], verbCount[i]))\n verbpCount = []\n trueVerbp = []\n for x in database.verbpast:\n if x in trueVerbp:\n a = trueVerbp.index(x)\n verbpCount[a] += 1\n else:\n trueVerbp.append(x)\n a = trueVerbp.index(x)\n verbpCount.append(1)\n for x in trueVerbp:\n i = trueVerbp.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VBD', locations[loc], verbpCount[i]))\n verb3pCount = []\n trueVerb3p = []\n for x in database.verb3person:\n if x in trueVerb3p:\n a = trueVerb3p.index(x)\n verb3pCount[a] += 1\n else:\n trueVerb3p.append(x)\n a = trueVerb3p.index(x)\n verb3pCount.append(1)\n for x in trueVerb3p:\n i = trueVerb3p.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'VBZ', locations[loc], verb3pCount[i]))\n adverbCount = []\n trueAdverb = []\n for x in database.adverb:\n if x in trueAdverb:\n a = trueAdverb.index(x)\n adverbCount[a] += 1\n else:\n trueAdverb.append(x)\n a = trueAdverb.index(x)\n adverbCount.append(1)\n for x in trueAdverb:\n i = trueAdverb.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'RB', locations[loc], adverbCount[i]))\n uselessCount = []\n trueUseless = []\n for x in database.useless:\n if x in trueUseless:\n a = trueUseless.index(x)\n uselessCount[a] += 1\n else:\n trueUseless.append(x)\n a = trueUseless.index(x)\n uselessCount.append(1)\n for x in trueUseless:\n i = trueUseless.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'PU', locations[loc], uselessCount[i]))\n uuWCount = []\n trueuuW = []\n for x in database.unUsedWords:\n if x in trueuuW:\n a = trueuuW.index(x)\n uuWCount[a] += 1\n else:\n trueuuW.append(x)\n a = trueuuW.index(x)\n uuWCount.append(1)\n for x in trueuuW:\n i = trueuuW.index(x)\n database.cursor.execute('INSERT INTO words VALUES (?, ?, ?, ?)', (x,\n 'US', locations[loc], uuWCount[i]))\n\n\ndef pop2():\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Knight','Castle','Old Man Jenkins','Picture')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Vampire' , 'Castle' , 'Andrew the Tour', 'Vampire Make Up and fake blood')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Shadow' , 'Castle' , 'Frank the Janitor' , 'Black paint')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ghost Pirate','Beach','Bill the Lifeguard','Pirate Costume')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Seaweed Monster','Beach','Old Fisherman Joe','Seaweed')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Shark','Beach','The Mayor','Shark fins')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Cowboy Ghost','Ghost Town','Jerry the Businessman ','Cowboy hat')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Miner Ghost','Ghost Town','Gold Hunter Phil','Dusty shoes')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Headless Horse Man','Ghost Town','Envirnmentalist Paddy','Drawing of rig to appear headless')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Francinstein','Haunted House','Sir Godfree','Green paint')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Zombie','Haunted House','The Waiter','Zombie Make Up and fake boy parts')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ghost','Haunted House','Jimmy','Glow in the dark paint on cloths')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ape Man','Jungle','Explorer Fred','Ape Costume')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Animal Ghosts','Jungle','Environmentalist Jennie','Scratch Marks')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Pterodactyl','Jungle','Tour Guide Bill','Book on flight')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Clown Ghost','Carnival','Ring Master','Old Clown Costumes')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Zombie','Carnival','Blind Knife Thrower','Eye tests saying he is not blind')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Animals','Carnival','Worlds Strongest Man','Scratch marks')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Ghost Car','Highway','Old Town Mayor','Car ownership documents')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('White Lady Ghost','Highway','Miss Anderson','White Dress')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Aliens','Highway','Conspiracy Tom','Fake Space ship blueprint')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Mummy','Pyramid','Museum Curator Petterson ','Bandages')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Sand Man','Pyramid','Ramesh the Tour Guide','Sand')\"\n )\n database.cursor.execute(\n \"INSERT INTO monsters VALUES ('Sphynx','Pyramid','Tour Guide Bob','scratch marks')\"\n )\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Scooby Doo','Scooby Dooby Doo')\")\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Shaggy','Zoinks!')\")\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Fred','Lets Split up and look for clues')\"\n )\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Velma','My glasses. I cant find my glasses')\"\n )\n database.cursor.execute(\n \"INSERT INTO characters VALUES ('Daphne','Do you want a Scooby Snack')\"\n )\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Stormy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Raining')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Dark')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Beach','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Beach','Misty')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Ghost Town','Cloudy')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Ghost TOwn','Foggy')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Haunted House','Stormy')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Haunted House','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Jungle','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Jungle','Raining')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Dark')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Cloudy')\"\n )\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Carnival','Overcast')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Highway','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Highway','Sunny')\")\n database.cursor.execute(\n \"INSERT INTO location VALUES ('Pyramid','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Raining')\"\n )\n",
"step-5": "import database\nimport nltk\ndef pop(i): # pupulate the words table\n loc = i\n sentencesTrial = []\n File = open('words.txt')\n lines = File.read()\n sentences = nltk.sent_tokenize(lines)\n locations = [\"Castle\",\"Beach\",\"Beach\",\"Ghost Town\",\"Ghost Town\",\"Haunted House\",\"Jungle\",\"Carnival\", \"Ghost Town\", \"Highway\", \"Castle\", \"Pyramid\",\"Beach\",\"Beach\",\"Carnival\", \"Highway\", \"Castle\" ,\"Jungle\" ]\n\n for sentence in sentences:\n for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))):\n if(pos == 'NN'):\n database.nouns.append(word.lower())\n sentencesTrial.append(\"NN\")\n elif (pos == 'NNS'):\n database.nounsplural.append(word.lower())\n sentencesTrial.append(\"NNS\")\n elif (pos == 'NNP'):\n database.propernounS.append(word.lower())\n sentencesTrial.append(\"NNP\")\n elif (pos == 'NNPS'):\n database.propernounP.append(word.lower())\n sentencesTrial.append(\"NNPS\")\n elif (pos == 'JJ'):\n database.adjective.append(word.lower())\n sentencesTrial.append(\"JJ\")\n elif (pos == 'VB' or pos == 'VBG' or pos == 'VBN'):\n database.verbs.append(word.lower())\n sentencesTrial.append(\"VB\")\n elif (pos == 'VBD'):\n database.verbpast.append(word.lower())\n sentencesTrial.append(\"VBD\")\n elif (pos == 'VBZ' or pos == 'VBP'):\n database.verb3person.append(word.lower())\n sentencesTrial.append(\"VBZ\")\n elif (pos == 'RB' or pos == 'RBR' or pos == 'RBS'):\n database.adverb.append(word)\n sentencesTrial.append(\"RB\".lower())\n else:\n if(word == \",\"):\n database.useless.append(word)\n sentencesTrial.append(\",\")\n break\n elif(word == \".\"):\n database.useless.append(word)\n sentencesTrial.append(\".\")\n break\n else:\n database.unUsedWords.append(word.lower())\n break\n\n nounCount = []\n trueNouns = []\n\n for x in database.nouns:\n if x in trueNouns:\n a = trueNouns.index(x)\n nounCount[a] +=1\n else:\n trueNouns.append(x)\n a = trueNouns.index(x)\n nounCount.append(1)\n\n for x in trueNouns:\n i = trueNouns.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x,'NN',locations[loc],nounCount[i]))\n\n nounpCount = []\n trueNounsp = []\n\n for x in database.nounsplural:\n if x in trueNounsp:\n a = trueNounsp.index(x)\n nounpCount[a] += 1\n else:\n trueNounsp.append(x)\n a = trueNounsp.index(x)\n nounpCount.append(1)\n\n for x in trueNounsp:\n i = trueNounsp.index(x)\n database.cursor.execute(\n \"INSERT INTO words VALUES (?, ?, ?, ?)\",\n (x, 'NNS', locations[loc], nounpCount[i]))\n\n pnounCount = []\n truepNouns = []\n\n for x in database.propernounS:\n if x in truepNouns:\n a = truepNouns.index(x)\n pnounCount[a] += 1\n else:\n truepNouns.append(x)\n a = truepNouns.index(x)\n pnounCount.append(1)\n\n for x in truepNouns:\n i = truepNouns.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'NNP', locations[loc], pnounCount[i]))\n\n pnounpCount = []\n truepNounsp = []\n\n for x in database.propernounP:\n if x in truepNounsp:\n a = truepNounsp.index(x)\n pnounpCount[a] += 1\n else:\n truepNounsp.append(x)\n a = truepNounsp.index(x)\n pnounpCount.append(1)\n\n for x in truepNounsp:\n i = truepNounsp.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'NNPS', locations[loc], pnounpCount[i]))\n\n adjectCount = []\n trueadject = []\n\n for x in database.adjective:\n if x in trueadject:\n a = trueadject.index(x)\n adjectCount[a] += 1\n else:\n trueadject.append(x)\n a = trueadject.index(x)\n adjectCount.append(1)\n\n for x in trueadject:\n i = trueadject.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'JJ', locations[loc], adjectCount[i]))\n\n verbCount = []\n trueVerb = []\n\n for x in database.verbs:\n if x in trueVerb:\n a = trueVerb.index(x)\n verbCount[a] += 1\n else:\n trueVerb.append(x)\n a = trueVerb.index(x)\n verbCount.append(1)\n\n for x in trueVerb:\n i = trueVerb.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'VB', locations[loc], verbCount[i]))\n\n verbpCount = []\n trueVerbp = []\n\n for x in database.verbpast:\n if x in trueVerbp:\n a = trueVerbp.index(x)\n verbpCount[a] += 1\n else:\n trueVerbp.append(x)\n a = trueVerbp.index(x)\n verbpCount.append(1)\n\n for x in trueVerbp:\n i = trueVerbp.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'VBD', locations[loc], verbpCount[i]))\n\n verb3pCount = []\n trueVerb3p = []\n\n for x in database.verb3person:\n if x in trueVerb3p:\n a = trueVerb3p.index(x)\n verb3pCount[a] += 1\n else:\n trueVerb3p.append(x)\n a = trueVerb3p.index(x)\n verb3pCount.append(1)\n\n for x in trueVerb3p:\n i = trueVerb3p.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'VBZ', locations[loc], verb3pCount[i]))\n\n adverbCount = []\n trueAdverb = []\n\n for x in database.adverb:\n if x in trueAdverb:\n a = trueAdverb.index(x)\n adverbCount[a] += 1\n else:\n trueAdverb.append(x)\n a = trueAdverb.index(x)\n adverbCount.append(1)\n\n for x in trueAdverb:\n i = trueAdverb.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'RB', locations[loc], adverbCount[i]))\n\n uselessCount = []\n trueUseless = []\n\n for x in database.useless:\n if x in trueUseless:\n a = trueUseless.index(x)\n uselessCount[a] += 1\n else:\n trueUseless.append(x)\n a = trueUseless.index(x)\n uselessCount.append(1)\n\n for x in trueUseless:\n i = trueUseless.index(x)\n database.cursor.execute(\n \"INSERT INTO words VALUES (?, ?, ?, ?)\",\n (x, 'PU', locations[loc], uselessCount[i]))\n\n uuWCount = []\n trueuuW = []\n\n for x in database.unUsedWords:\n if x in trueuuW:\n a = trueuuW.index(x)\n uuWCount[a] += 1\n else:\n trueuuW.append(x)\n a = trueuuW.index(x)\n uuWCount.append(1)\n\n for x in trueuuW:\n i = trueuuW.index(x)\n database.cursor.execute(\"INSERT INTO words VALUES (?, ?, ?, ?)\", (x, 'US', locations[loc], uuWCount[i]))\n\n\ndef pop2(): #populate the monster and characters table\n\n####populating the monsters\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Knight','Castle','Old Man Jenkins','Picture')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Vampire' , 'Castle' , 'Andrew the Tour', 'Vampire Make Up and fake blood')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Shadow' , 'Castle' , 'Frank the Janitor' , 'Black paint')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Ghost Pirate','Beach','Bill the Lifeguard','Pirate Costume')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Seaweed Monster','Beach','Old Fisherman Joe','Seaweed')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Shark','Beach','The Mayor','Shark fins')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Cowboy Ghost','Ghost Town','Jerry the Businessman ','Cowboy hat')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Miner Ghost','Ghost Town','Gold Hunter Phil','Dusty shoes')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Headless Horse Man','Ghost Town','Envirnmentalist Paddy','Drawing of rig to appear headless')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Francinstein','Haunted House','Sir Godfree','Green paint')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Zombie','Haunted House','The Waiter','Zombie Make Up and fake boy parts')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Ghost','Haunted House','Jimmy','Glow in the dark paint on cloths')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Ape Man','Jungle','Explorer Fred','Ape Costume')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Animal Ghosts','Jungle','Environmentalist Jennie','Scratch Marks')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Pterodactyl','Jungle','Tour Guide Bill','Book on flight')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Clown Ghost','Carnival','Ring Master','Old Clown Costumes')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Zombie','Carnival','Blind Knife Thrower','Eye tests saying he is not blind')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Animals','Carnival','Worlds Strongest Man','Scratch marks')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Ghost Car','Highway','Old Town Mayor','Car ownership documents')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('White Lady Ghost','Highway','Miss Anderson','White Dress')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Aliens','Highway','Conspiracy Tom','Fake Space ship blueprint')\")\n\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Mummy','Pyramid','Museum Curator Petterson ','Bandages')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Sand Man','Pyramid','Ramesh the Tour Guide','Sand')\")\n database.cursor.execute(\"INSERT INTO monsters VALUES ('Sphynx','Pyramid','Tour Guide Bob','scratch marks')\")\n\n####populating the characters\n\n\n database.cursor.execute(\"INSERT INTO characters VALUES ('Scooby Doo','Scooby Dooby Doo')\")\n database.cursor.execute(\"INSERT INTO characters VALUES ('Shaggy','Zoinks!')\")\n database.cursor.execute(\"INSERT INTO characters VALUES ('Fred','Lets Split up and look for clues')\")\n database.cursor.execute(\"INSERT INTO characters VALUES ('Velma','My glasses. I cant find my glasses')\")\n database.cursor.execute(\"INSERT INTO characters VALUES ('Daphne','Do you want a Scooby Snack')\")\n\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Stormy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Raining')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Castle','Dark')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Beach','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Beach','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Ghost Town','Cloudy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Ghost TOwn','Foggy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Haunted House','Stormy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Haunted House','Misty')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Jungle','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Jungle','Raining')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Dark')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Cloudy')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Carnival','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Highway','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Highway','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Overcast')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Sunny')\")\n database.cursor.execute(\"INSERT INTO location VALUES ('Pyramid','Raining')\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ImageClassifierMockup(ImageClassifier):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ImageClassifierMockup(ImageClassifier):
<|reserved_special_token_0|>
def classify_image(self, image):
return {'final_class': 'dog', 'final_prob': 0.8}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ImageClassifierMockup(ImageClassifier):
def classify_images(self, images):
pass
def classify_image(self, image):
return {'final_class': 'dog', 'final_prob': 0.8}
<|reserved_special_token_1|>
from allcode.controllers.image_classifiers.image_classifier import ImageClassifier
class ImageClassifierMockup(ImageClassifier):
def classify_images(self, images):
pass
def classify_image(self, image):
return {'final_class': 'dog', 'final_prob': 0.8}
<|reserved_special_token_1|>
from allcode.controllers.image_classifiers.image_classifier import ImageClassifier
class ImageClassifierMockup(ImageClassifier):
def classify_images(self, images):
pass
def classify_image(self, image):
return {'final_class': 'dog',
'final_prob': .8}
|
flexible
|
{
"blob_id": "71fb9dc9f9ac8b1cdbc6af8a859dbc211512b4d1",
"index": 1675,
"step-1": "<mask token>\n\n\nclass ImageClassifierMockup(ImageClassifier):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageClassifierMockup(ImageClassifier):\n <mask token>\n\n def classify_image(self, image):\n return {'final_class': 'dog', 'final_prob': 0.8}\n",
"step-3": "<mask token>\n\n\nclass ImageClassifierMockup(ImageClassifier):\n\n def classify_images(self, images):\n pass\n\n def classify_image(self, image):\n return {'final_class': 'dog', 'final_prob': 0.8}\n",
"step-4": "from allcode.controllers.image_classifiers.image_classifier import ImageClassifier\n\n\nclass ImageClassifierMockup(ImageClassifier):\n\n def classify_images(self, images):\n pass\n\n def classify_image(self, image):\n return {'final_class': 'dog', 'final_prob': 0.8}\n",
"step-5": "from allcode.controllers.image_classifiers.image_classifier import ImageClassifier\n\n\nclass ImageClassifierMockup(ImageClassifier):\n\n def classify_images(self, images):\n pass\n\n def classify_image(self, image):\n return {'final_class': 'dog',\n 'final_prob': .8}\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import mysql.connector
# config = {
# "user":"root",
# "password":"Sm13481353",
# "host":"3"
# }
mydb = mysql.connector.connect(
user="seyed",
password="Sm13481353",
host="localhost",
database="telegram_bot",
auth_plugin="mysql_native_password"
)
mycursor = mydb.cursor()
query = "insert into question(update_id,chat_id) values (40,20)"
# mycursor.execute(query)
# mydb.commit()
mycursor.execute("select * from question")
users = mycursor.fetchall()
for user in users:
print(user)
|
normal
|
{
"blob_id": "a29a904290cb733ac7b526a75e0c218b952e2266",
"index": 4630,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmycursor.execute('select * from question')\n<mask token>\nfor user in users:\n print(user)\n",
"step-3": "<mask token>\nmydb = mysql.connector.connect(user='seyed', password='Sm13481353', host=\n 'localhost', database='telegram_bot', auth_plugin='mysql_native_password')\nmycursor = mydb.cursor()\nquery = 'insert into question(update_id,chat_id) values (40,20)'\nmycursor.execute('select * from question')\nusers = mycursor.fetchall()\nfor user in users:\n print(user)\n",
"step-4": "import mysql.connector\nmydb = mysql.connector.connect(user='seyed', password='Sm13481353', host=\n 'localhost', database='telegram_bot', auth_plugin='mysql_native_password')\nmycursor = mydb.cursor()\nquery = 'insert into question(update_id,chat_id) values (40,20)'\nmycursor.execute('select * from question')\nusers = mycursor.fetchall()\nfor user in users:\n print(user)\n",
"step-5": "import mysql.connector\n# config = {\n# \"user\":\"root\",\n# \"password\":\"Sm13481353\",\n# \"host\":\"3\"\n# }\nmydb = mysql.connector.connect(\n user=\"seyed\",\n password=\"Sm13481353\",\n host=\"localhost\",\n database=\"telegram_bot\",\n auth_plugin=\"mysql_native_password\"\n )\nmycursor = mydb.cursor()\nquery = \"insert into question(update_id,chat_id) values (40,20)\"\n# mycursor.execute(query)\n# mydb.commit()\nmycursor.execute(\"select * from question\")\nusers = mycursor.fetchall()\nfor user in users:\n print(user)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('play', '0001_initial')]
operations = [migrations.CreateModel(name='playerA', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('playerA', models.CharField(max_length
=15)), ('join_id', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':
'PlayerA'}), migrations.CreateModel(name='playerB', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('playerB', models.CharField(max_length
=15)), ('join_id', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':
'PlayerB'}), migrations.DeleteModel(name='Player')]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('play', '0001_initial')]
operations = [migrations.CreateModel(name='playerA', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('playerA', models.CharField(max_length
=15)), ('join_id', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':
'PlayerA'}), migrations.CreateModel(name='playerB', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('playerB', models.CharField(max_length
=15)), ('join_id', models.ForeignKey(on_delete=django.db.models.
deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':
'PlayerB'}), migrations.DeleteModel(name='Player')]
<|reserved_special_token_1|>
# Generated by Django 3.0.7 on 2020-12-16 15:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('play', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='playerA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playerA', models.CharField(max_length=15)),
('join_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='play.Room')),
],
options={
'verbose_name_plural': 'PlayerA',
},
),
migrations.CreateModel(
name='playerB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playerB', models.CharField(max_length=15)),
('join_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='play.Room')),
],
options={
'verbose_name_plural': 'PlayerB',
},
),
migrations.DeleteModel(
name='Player',
),
]
|
flexible
|
{
"blob_id": "ea414835554ea3dcac2017036692cf178526f91b",
"index": 5641,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('play', '0001_initial')]\n operations = [migrations.CreateModel(name='playerA', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('playerA', models.CharField(max_length\n =15)), ('join_id', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':\n 'PlayerA'}), migrations.CreateModel(name='playerB', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('playerB', models.CharField(max_length\n =15)), ('join_id', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':\n 'PlayerB'}), migrations.DeleteModel(name='Player')]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('play', '0001_initial')]\n operations = [migrations.CreateModel(name='playerA', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('playerA', models.CharField(max_length\n =15)), ('join_id', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':\n 'PlayerA'}), migrations.CreateModel(name='playerB', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('playerB', models.CharField(max_length\n =15)), ('join_id', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='play.Room'))], options={'verbose_name_plural':\n 'PlayerB'}), migrations.DeleteModel(name='Player')]\n",
"step-5": "# Generated by Django 3.0.7 on 2020-12-16 15:29\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('play', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='playerA',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('playerA', models.CharField(max_length=15)),\r\n ('join_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='play.Room')),\r\n ],\r\n options={\r\n 'verbose_name_plural': 'PlayerA',\r\n },\r\n ),\r\n migrations.CreateModel(\r\n name='playerB',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('playerB', models.CharField(max_length=15)),\r\n ('join_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='play.Room')),\r\n ],\r\n options={\r\n 'verbose_name_plural': 'PlayerB',\r\n },\r\n ),\r\n migrations.DeleteModel(\r\n name='Player',\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def grayCode(self, n: int) ->List[int]:
res = [0] * 2 ** n
exp = 0
l = r = 1
for i in range(1, 2 ** n):
res[i] += res[r - i] + 2 ** exp
if i == r:
exp += 1
l = r + 1
r = l + 2 ** exp - 1
return res
<|reserved_special_token_1|>
from typing import List
class Solution:
def grayCode(self, n: int) ->List[int]:
res = [0] * 2 ** n
exp = 0
l = r = 1
for i in range(1, 2 ** n):
res[i] += res[r - i] + 2 ** exp
if i == r:
exp += 1
l = r + 1
r = l + 2 ** exp - 1
return res
|
flexible
|
{
"blob_id": "dc600763b12edda05820721098e7e5bc80f74c89",
"index": 4798,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def grayCode(self, n: int) ->List[int]:\n res = [0] * 2 ** n\n exp = 0\n l = r = 1\n for i in range(1, 2 ** n):\n res[i] += res[r - i] + 2 ** exp\n if i == r:\n exp += 1\n l = r + 1\n r = l + 2 ** exp - 1\n return res\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def grayCode(self, n: int) ->List[int]:\n res = [0] * 2 ** n\n exp = 0\n l = r = 1\n for i in range(1, 2 ** n):\n res[i] += res[r - i] + 2 ** exp\n if i == r:\n exp += 1\n l = r + 1\n r = l + 2 ** exp - 1\n return res\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import matplotlib.pyplot as plt
w1 = [(1, 2, 7), (1, 8, 1),
(1, 7, 5), (1, 6, 3),
(1, 7, 8), (1, 5, 9),
(1, 4, 5)]
w2 = [(-1, -4, -2), (-1, 1, 1),
(-1, -1, -3), (-1, -3, 2),
(-1, -5, -3.25), (-1, -2, -4),
(-1, -7, -1)]
dataset = [(1, 2, 7), (1, 8, 1),
(1, 7, 5), (1, 6, 3),
(1, 7, 8), (1, 5, 9),
(1, 4, 5), (-1, -4, -2),
(-1, 1, 1), (-1, -1, -3),
(-1, -3, 2), (-1, -5, -3.25),
(-1, -2, -4), (-1, -7, -1)]
# Single Perceptron function
def single_sample_perceptron():
weight = [1, 1, 1]
iterations = 0
while(1):
iterations = iterations+1
ans = 0
count = 0
eta = 0.2
# print weight
for j in xrange(len(dataset)):
ans = 0
for i in xrange(3):
ans = ans+float(weight[i]*dataset[j][i])
if(ans < 0):
for i in xrange(3):
weight[i] = weight[i]+eta*dataset[j][i]
break
count += 1
if count == len(dataset):
break
print
print "Final weights: ",
print weight
print "No. of Iterations: ",
print iterations
return weight
def main():
a = single_sample_perceptron()
x1 = x2 = y1 = y2 = []
for j in range(len(w1)):
x1.append(w1[j][1])
y1.append(w1[j][2])
for j in range(len(w2)):
x2.append((-1)*w2[j][1])
y2.append((-1)*w2[j][2])
plt.plot(x1, y1, 'ro')
plt.plot(x2, y2, 'bo')
m1 = a[2]/a[1]
m2 = (-1)/(m1)
c = (-1)*a[0]/a[2]
ya = m2*100+c
yb = m2*(-100)+c
plt.plot([100, -100], [ya, yb], 'r')
plt.axis([-10, 10, -10, 10])
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "15105e22b3c1860735f282a2247ab41b138d75cf",
"index": 3452,
"step-1": "import matplotlib.pyplot as plt\n\nw1 = [(1, 2, 7), (1, 8, 1),\n (1, 7, 5), (1, 6, 3),\n (1, 7, 8), (1, 5, 9),\n (1, 4, 5)]\nw2 = [(-1, -4, -2), (-1, 1, 1),\n (-1, -1, -3), (-1, -3, 2),\n (-1, -5, -3.25), (-1, -2, -4),\n (-1, -7, -1)]\ndataset = [(1, 2, 7), (1, 8, 1),\n (1, 7, 5), (1, 6, 3),\n (1, 7, 8), (1, 5, 9),\n (1, 4, 5), (-1, -4, -2),\n (-1, 1, 1), (-1, -1, -3),\n (-1, -3, 2), (-1, -5, -3.25),\n (-1, -2, -4), (-1, -7, -1)]\n\n\n# Single Perceptron function\ndef single_sample_perceptron():\n weight = [1, 1, 1]\n iterations = 0\n while(1):\n iterations = iterations+1\n ans = 0\n count = 0\n eta = 0.2\n # print weight\n for j in xrange(len(dataset)):\n ans = 0\n for i in xrange(3):\n ans = ans+float(weight[i]*dataset[j][i])\n if(ans < 0):\n for i in xrange(3):\n weight[i] = weight[i]+eta*dataset[j][i]\n break\n count += 1\n if count == len(dataset):\n break\n print\n print \"Final weights: \",\n print weight\n print \"No. of Iterations: \",\n print iterations\n\n return weight\n\n\ndef main():\n a = single_sample_perceptron()\n x1 = x2 = y1 = y2 = []\n for j in range(len(w1)):\n x1.append(w1[j][1])\n y1.append(w1[j][2])\n for j in range(len(w2)):\n x2.append((-1)*w2[j][1])\n y2.append((-1)*w2[j][2])\n\n plt.plot(x1, y1, 'ro')\n plt.plot(x2, y2, 'bo')\n m1 = a[2]/a[1]\n m2 = (-1)/(m1)\n c = (-1)*a[0]/a[2]\n ya = m2*100+c\n yb = m2*(-100)+c\n plt.plot([100, -100], [ya, yb], 'r')\n plt.axis([-10, 10, -10, 10])\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
layout = html.Div([html.Div([html.Div([html.H6('Répartition des biens'),
dcc.Graph(id='pieGraph', figure={'data': [{'values': [2878001, 2342181,
1773296, 521395], 'labels': ['Maison', 'Appartement', 'Dependance',
'local_indistriel'], 'name': 'Biens', 'hoverinfo': 'label+name+percent',
'hole': 0.7, 'type': 'pie', 'marker': {'colors': ['#3b7548', '#ea1313',
'#ffd700', '#FF00FF']}}], 'layout': {'width': '2000', 'annotations': [{
'font': {'size': 20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y':
0.2}], 'showlegend': False}})], className='six columns'), html.Div([
html.H6('Effectif des biens'), dcc.Graph(id='3', figure={'data': [{'x':
['Maison', 'Appartement', 'Dependance', 'local_indistriel'], 'y': [
2878001, 2342181, 1773296, 521395], 'name': 'Bar biens', 'type': 'bar',
'marker': dict(color=['#3b7548', '#ea1313', '#ffd700', '#FF00FF'])}],
'layout': {'xaxis': dict(tickfont=dict(color='black')), 'yaxis': dict(
tickfont=dict(color='black')), 'width': '2000', 'yaxis': {'title':
'Nombre'}, 'xaxis': {'title': 'Type'}, 'annotations': [{'font': {'size':
20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y': 0.2}], 'showlegend':
False}})], className='six columns')], className='row', style={'margin':
'1% 3%'})])
<|reserved_special_token_1|>
import dash_html_components as html
import dash_core_components as dcc
layout = html.Div([html.Div([html.Div([html.H6('Répartition des biens'),
dcc.Graph(id='pieGraph', figure={'data': [{'values': [2878001, 2342181,
1773296, 521395], 'labels': ['Maison', 'Appartement', 'Dependance',
'local_indistriel'], 'name': 'Biens', 'hoverinfo': 'label+name+percent',
'hole': 0.7, 'type': 'pie', 'marker': {'colors': ['#3b7548', '#ea1313',
'#ffd700', '#FF00FF']}}], 'layout': {'width': '2000', 'annotations': [{
'font': {'size': 20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y':
0.2}], 'showlegend': False}})], className='six columns'), html.Div([
html.H6('Effectif des biens'), dcc.Graph(id='3', figure={'data': [{'x':
['Maison', 'Appartement', 'Dependance', 'local_indistriel'], 'y': [
2878001, 2342181, 1773296, 521395], 'name': 'Bar biens', 'type': 'bar',
'marker': dict(color=['#3b7548', '#ea1313', '#ffd700', '#FF00FF'])}],
'layout': {'xaxis': dict(tickfont=dict(color='black')), 'yaxis': dict(
tickfont=dict(color='black')), 'width': '2000', 'yaxis': {'title':
'Nombre'}, 'xaxis': {'title': 'Type'}, 'annotations': [{'font': {'size':
20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y': 0.2}], 'showlegend':
False}})], className='six columns')], className='row', style={'margin':
'1% 3%'})])
<|reserved_special_token_1|>
import dash_html_components as html
import dash_core_components as dcc
layout = html.Div([
html.Div([
html.Div([
html.H6('Répartition des biens'),
dcc.Graph(
id = "pieGraph",
figure = {
"data": [{
"values": [2878001,2342181,1773296,521395],
"labels": [ 'Maison', 'Appartement', 'Dependance','local_indistriel' ],
"name": "Biens",
"hoverinfo":"label+name+percent",
"hole": .7,
"type": "pie",
"marker": {'colors':['#3b7548','#ea1313','#ffd700','#FF00FF']}
}],
"layout": {
"width": "2000",
"annotations": [{
"font": {
"size": 20
},
"showarrow": False,
"text": "",
"x": 0.2,
"y": 0.2
}],
"showlegend": False
}
}
)
], className="six columns"),
html.Div([
html.H6('Effectif des biens'),
dcc.Graph(
id = "3",
figure ={
"data": [{
'x':[ 'Maison', 'Appartement', 'Dependance','local_indistriel' ],
'y':[2878001,2342181,1773296,521395],
'name':'Bar biens',
'type':'bar',
'marker' :dict(color=['#3b7548','#ea1313','#ffd700','#FF00FF']),
}],
"layout": {
"xaxis" : dict(tickfont=dict(color='black')),
"yaxis" : dict(tickfont=dict(color='black')),
"width": "2000",
'yaxis':{
'title':'Nombre'
},
'xaxis':{
'title':'Type'
},
"annotations": [{
"font": {"size": 20},
"showarrow": False,
"text": "",
"x": 0.2,
"y": 0.2
}],
"showlegend": False
}
}
)
], className="six columns"),
], className="row", style={"margin": "1% 3%"})
])
|
flexible
|
{
"blob_id": "83c3193ea40c9328d16fb91774762a76352d8e09",
"index": 8417,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlayout = html.Div([html.Div([html.Div([html.H6('Répartition des biens'),\n dcc.Graph(id='pieGraph', figure={'data': [{'values': [2878001, 2342181,\n 1773296, 521395], 'labels': ['Maison', 'Appartement', 'Dependance',\n 'local_indistriel'], 'name': 'Biens', 'hoverinfo': 'label+name+percent',\n 'hole': 0.7, 'type': 'pie', 'marker': {'colors': ['#3b7548', '#ea1313',\n '#ffd700', '#FF00FF']}}], 'layout': {'width': '2000', 'annotations': [{\n 'font': {'size': 20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y': \n 0.2}], 'showlegend': False}})], className='six columns'), html.Div([\n html.H6('Effectif des biens'), dcc.Graph(id='3', figure={'data': [{'x':\n ['Maison', 'Appartement', 'Dependance', 'local_indistriel'], 'y': [\n 2878001, 2342181, 1773296, 521395], 'name': 'Bar biens', 'type': 'bar',\n 'marker': dict(color=['#3b7548', '#ea1313', '#ffd700', '#FF00FF'])}],\n 'layout': {'xaxis': dict(tickfont=dict(color='black')), 'yaxis': dict(\n tickfont=dict(color='black')), 'width': '2000', 'yaxis': {'title':\n 'Nombre'}, 'xaxis': {'title': 'Type'}, 'annotations': [{'font': {'size':\n 20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y': 0.2}], 'showlegend':\n False}})], className='six columns')], className='row', style={'margin':\n '1% 3%'})])\n",
"step-3": "import dash_html_components as html\nimport dash_core_components as dcc\nlayout = html.Div([html.Div([html.Div([html.H6('Répartition des biens'),\n dcc.Graph(id='pieGraph', figure={'data': [{'values': [2878001, 2342181,\n 1773296, 521395], 'labels': ['Maison', 'Appartement', 'Dependance',\n 'local_indistriel'], 'name': 'Biens', 'hoverinfo': 'label+name+percent',\n 'hole': 0.7, 'type': 'pie', 'marker': {'colors': ['#3b7548', '#ea1313',\n '#ffd700', '#FF00FF']}}], 'layout': {'width': '2000', 'annotations': [{\n 'font': {'size': 20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y': \n 0.2}], 'showlegend': False}})], className='six columns'), html.Div([\n html.H6('Effectif des biens'), dcc.Graph(id='3', figure={'data': [{'x':\n ['Maison', 'Appartement', 'Dependance', 'local_indistriel'], 'y': [\n 2878001, 2342181, 1773296, 521395], 'name': 'Bar biens', 'type': 'bar',\n 'marker': dict(color=['#3b7548', '#ea1313', '#ffd700', '#FF00FF'])}],\n 'layout': {'xaxis': dict(tickfont=dict(color='black')), 'yaxis': dict(\n tickfont=dict(color='black')), 'width': '2000', 'yaxis': {'title':\n 'Nombre'}, 'xaxis': {'title': 'Type'}, 'annotations': [{'font': {'size':\n 20}, 'showarrow': False, 'text': '', 'x': 0.2, 'y': 0.2}], 'showlegend':\n False}})], className='six columns')], className='row', style={'margin':\n '1% 3%'})])\n",
"step-4": "import dash_html_components as html\nimport dash_core_components as dcc\n\n\n\nlayout = html.Div([\n html.Div([\n html.Div([\n html.H6('Répartition des biens'),\n dcc.Graph(\n id = \"pieGraph\",\n figure = {\n \"data\": [{\n \"values\": [2878001,2342181,1773296,521395],\n \"labels\": [ 'Maison', 'Appartement', 'Dependance','local_indistriel' ],\n \"name\": \"Biens\",\n \"hoverinfo\":\"label+name+percent\",\n \"hole\": .7,\n \"type\": \"pie\",\n \"marker\": {'colors':['#3b7548','#ea1313','#ffd700','#FF00FF']}\n }],\n \"layout\": {\n \"width\": \"2000\",\n \"annotations\": [{\n \"font\": {\n \"size\": 20\n },\n \"showarrow\": False,\n \"text\": \"\",\n \"x\": 0.2,\n \"y\": 0.2\n }],\n \"showlegend\": False \n }\n }\n )\n ], className=\"six columns\"),\n\n html.Div([\n html.H6('Effectif des biens'),\n\n dcc.Graph(\n id = \"3\",\n figure ={\n \"data\": [{\n 'x':[ 'Maison', 'Appartement', 'Dependance','local_indistriel' ],\n 'y':[2878001,2342181,1773296,521395],\n 'name':'Bar biens',\n 'type':'bar',\n 'marker' :dict(color=['#3b7548','#ea1313','#ffd700','#FF00FF']),\n }],\n \"layout\": {\n \"xaxis\" : dict(tickfont=dict(color='black')),\n \"yaxis\" : dict(tickfont=dict(color='black')),\n \"width\": \"2000\",\n 'yaxis':{\n 'title':'Nombre'\n },\n 'xaxis':{\n 'title':'Type'\n },\n \"annotations\": [{\n \"font\": {\"size\": 20},\n \"showarrow\": False,\n \"text\": \"\",\n \"x\": 0.2,\n \"y\": 0.2\n }],\n \"showlegend\": False \n }\n }\n )\n\n ], className=\"six columns\"),\n\n ], className=\"row\", style={\"margin\": \"1% 3%\"})\n])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
con = serial.Serial('/dev/tty****', 9600)
print('connected.')
while 1:
str = con.readline()
print(str.strip().decode('utf-8'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
con = serial.Serial('/dev/tty****', 9600)
print('connected.')
while 1:
str = con.readline()
print(str.strip().decode('utf-8'))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import serial
import time
def main():
con = serial.Serial('/dev/tty****', 9600)
print('connected.')
while 1:
str = con.readline()
print(str.strip().decode('utf-8'))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import serial
import time
def main():
# '/dev/tty****' is your port ID
con=serial.Serial('/dev/tty****', 9600)
print('connected.')
while 1:
str=con.readline() # byte code
print (str.strip().decode('utf-8')) # decoded string
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "108c8bbb4d3dbc6b7f32e084b13009296b3c5a80",
"index": 8016,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n con = serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str = con.readline()\n print(str.strip().decode('utf-8'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n con = serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str = con.readline()\n print(str.strip().decode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import serial\nimport time\n\n\ndef main():\n con = serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str = con.readline()\n print(str.strip().decode('utf-8'))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import serial\nimport time\n\ndef main():\n # '/dev/tty****' is your port ID\n con=serial.Serial('/dev/tty****', 9600)\n print('connected.')\n while 1:\n str=con.readline() # byte code\n print (str.strip().decode('utf-8')) # decoded string\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
from pypsi.pipes import ThreadLocalStream
from pypsi.shell import Shell
from pypsi.core import pypsi_print
from nose.tools import *
class PypsiTestShell(Shell):
pass
class TestShellBootstrap(object):
def setUp(self):
self.real_stdout = sys.stdout
self.real_stderr = sys.stderr
self.real_stdin = sys.stdin
self.real_print = print
self.shell = PypsiTestShell()
def tearDown(self):
self.shell.restore()
def test_bootstrap_streams(self):
for attr in ('stdout', 'stderr', 'stdin'):
yield self._test_bootstrap_stream_type, attr
yield self._test_bootstrap_stream_instance, attr
def _test_bootstrap_stream_type(self, attr):
assert_is_instance(getattr(sys, attr), ThreadLocalStream)
def _test_bootstrap_stream_instance(self, attr):
assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,
'real_' + attr))
def test_bootstrap_print(self):
assert_equal(print, pypsi_print)
def test_restore_print(self):
self.shell.restore()
assert_equal(print, self.real_print)
def test_restore_streams(self):
for attr in ('stdout', 'stderr', 'stdin'):
yield self._test_restore_stream_type, attr
yield self._test_restore_stream_instance, attr
def _test_restore_stream_type(self, attr):
self.shell.restore()
assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)
def _test_restore_stream_instance(self, attr):
self.shell.restore()
assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))
|
normal
|
{
"blob_id": "1983340b3ce7ba8b631ba090871bea1ef7044943",
"index": 9333,
"step-1": "<mask token>\n\n\nclass TestShellBootstrap(object):\n <mask token>\n\n def tearDown(self):\n self.shell.restore()\n <mask token>\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n <mask token>\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n <mask token>\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n",
"step-2": "<mask token>\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n <mask token>\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n",
"step-3": "<mask token>\n\n\nclass PypsiTestShell(Shell):\n pass\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_bootstrap_stream_instance(self, attr):\n assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,\n 'real_' + attr))\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n",
"step-4": "import sys\nfrom pypsi.pipes import ThreadLocalStream\nfrom pypsi.shell import Shell\nfrom pypsi.core import pypsi_print\nfrom nose.tools import *\n\n\nclass PypsiTestShell(Shell):\n pass\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_bootstrap_stream_instance(self, attr):\n assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,\n 'real_' + attr))\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n",
"step-5": null,
"step-ids": [
7,
10,
12,
13
]
}
|
[
7,
10,
12,
13
] |
import scrapy
import datetime
from tzscrape.items import CitizenItem
class CitizenSpider(scrapy.Spider):
name = 'citizen'
allowed_domains = ['thecitizen.co.tz']
start_urls = ['http://www.thecitizen.co.tz/']
def parse(self, response):
# headlines
for href in response.xpath('//*[@itemprop="headline"]/a/@href'):
url = response.urljoin(href.extract())
yield scrapy.Request(url, callback=self.parse_article)
#teasers
for href in response.css('li.story-teaser').xpath('a/@href[1]'):
url = response.urljoin(href.extract())
yield scrapy.Request(url, callback=self.parse_article)
def parse_article(self, response):
item = CitizenItem()
item['body'] = response.xpath('//div[@itemprop="articleBody"]/div/p//text()').extract()
if not item['body']:
yield None
else :
item['url'] = response.url
item['publication'] = 'citizen'
item['title'] = response.css('h1').xpath('text()').extract()
item['byline'] = response.css('section.author').xpath('text()').extract()
item['scraped_at'] = datetime.datetime.utcnow().isoformat()
yield item
|
normal
|
{
"blob_id": "d307c3479e34a12971f62a765aca2ba0850d80d1",
"index": 5660,
"step-1": "<mask token>\n\n\nclass CitizenSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CitizenSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n for href in response.xpath('//*[@itemprop=\"headline\"]/a/@href'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n for href in response.css('li.story-teaser').xpath('a/@href[1]'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n\n def parse_article(self, response):\n item = CitizenItem()\n item['body'] = response.xpath(\n '//div[@itemprop=\"articleBody\"]/div/p//text()').extract()\n if not item['body']:\n yield None\n else:\n item['url'] = response.url\n item['publication'] = 'citizen'\n item['title'] = response.css('h1').xpath('text()').extract()\n item['byline'] = response.css('section.author').xpath('text()'\n ).extract()\n item['scraped_at'] = datetime.datetime.utcnow().isoformat()\n yield item\n",
"step-3": "<mask token>\n\n\nclass CitizenSpider(scrapy.Spider):\n name = 'citizen'\n allowed_domains = ['thecitizen.co.tz']\n start_urls = ['http://www.thecitizen.co.tz/']\n\n def parse(self, response):\n for href in response.xpath('//*[@itemprop=\"headline\"]/a/@href'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n for href in response.css('li.story-teaser').xpath('a/@href[1]'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n\n def parse_article(self, response):\n item = CitizenItem()\n item['body'] = response.xpath(\n '//div[@itemprop=\"articleBody\"]/div/p//text()').extract()\n if not item['body']:\n yield None\n else:\n item['url'] = response.url\n item['publication'] = 'citizen'\n item['title'] = response.css('h1').xpath('text()').extract()\n item['byline'] = response.css('section.author').xpath('text()'\n ).extract()\n item['scraped_at'] = datetime.datetime.utcnow().isoformat()\n yield item\n",
"step-4": "import scrapy\nimport datetime\nfrom tzscrape.items import CitizenItem\n\n\nclass CitizenSpider(scrapy.Spider):\n name = 'citizen'\n allowed_domains = ['thecitizen.co.tz']\n start_urls = ['http://www.thecitizen.co.tz/']\n\n def parse(self, response):\n for href in response.xpath('//*[@itemprop=\"headline\"]/a/@href'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n for href in response.css('li.story-teaser').xpath('a/@href[1]'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n\n def parse_article(self, response):\n item = CitizenItem()\n item['body'] = response.xpath(\n '//div[@itemprop=\"articleBody\"]/div/p//text()').extract()\n if not item['body']:\n yield None\n else:\n item['url'] = response.url\n item['publication'] = 'citizen'\n item['title'] = response.css('h1').xpath('text()').extract()\n item['byline'] = response.css('section.author').xpath('text()'\n ).extract()\n item['scraped_at'] = datetime.datetime.utcnow().isoformat()\n yield item\n",
"step-5": "import scrapy\nimport datetime\nfrom tzscrape.items import CitizenItem\n\nclass CitizenSpider(scrapy.Spider):\n name = 'citizen'\n allowed_domains = ['thecitizen.co.tz']\n start_urls = ['http://www.thecitizen.co.tz/']\n\n def parse(self, response):\n # headlines\n for href in response.xpath('//*[@itemprop=\"headline\"]/a/@href'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n\n #teasers\n for href in response.css('li.story-teaser').xpath('a/@href[1]'):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, callback=self.parse_article)\n\n\n def parse_article(self, response):\n item = CitizenItem()\n item['body'] = response.xpath('//div[@itemprop=\"articleBody\"]/div/p//text()').extract()\n\n if not item['body']:\n yield None\n else :\n item['url'] = response.url\n item['publication'] = 'citizen'\n item['title'] = response.css('h1').xpath('text()').extract()\n item['byline'] = response.css('section.author').xpath('text()').extract()\n item['scraped_at'] = datetime.datetime.utcnow().isoformat()\n yield item",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from peewee import BlobField
class BytesField(BlobField):
"""This is a BlobField adapted to our needs
Default BlobField returns memoryview when getting data from the db. We want bytes.
"""
def adapt(self, value):
if value and isinstance(value, memoryview):
return value.tobytes()
return value
|
normal
|
{
"blob_id": "b11869076c2c8d6207df861cd1d0b0434b3f9477",
"index": 9836,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BytesField(BlobField):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BytesField(BlobField):\n <mask token>\n\n def adapt(self, value):\n if value and isinstance(value, memoryview):\n return value.tobytes()\n return value\n",
"step-4": "<mask token>\n\n\nclass BytesField(BlobField):\n \"\"\"This is a BlobField adapted to our needs\n Default BlobField returns memoryview when getting data from the db. We want bytes.\n \"\"\"\n\n def adapt(self, value):\n if value and isinstance(value, memoryview):\n return value.tobytes()\n return value\n",
"step-5": "from peewee import BlobField\n\n\nclass BytesField(BlobField):\n \"\"\"This is a BlobField adapted to our needs\n Default BlobField returns memoryview when getting data from the db. We want bytes.\n \"\"\"\n\n def adapt(self, value):\n if value and isinstance(value, memoryview):\n return value.tobytes()\n return value\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdminReqNoDetails(Resource):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdminReqNoDetails(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('request_no', type=int, required=True, help=
'request_no cannot be left blank!')
data = parser.parse_args()
qstr = (
f" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; "
)
try:
return query(qstr)
except:
return {'message':
'There was an error connecting to the requests table while retrieving.'
}, 500
<|reserved_special_token_1|>
from flask_restful import Resource, reqparse
from db import query
import pymysql
from flask_jwt_extended import jwt_required
<|reserved_special_token_0|>
class AdminReqNoDetails(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('request_no', type=int, required=True, help=
'request_no cannot be left blank!')
data = parser.parse_args()
qstr = (
f" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; "
)
try:
return query(qstr)
except:
return {'message':
'There was an error connecting to the requests table while retrieving.'
}, 500
<|reserved_special_token_1|>
from flask_restful import Resource, reqparse
from db import query
import pymysql
from flask_jwt_extended import jwt_required
"""
This module is used to retrieve the data
for all the request_no's which have a false or a 0 select_status.
This is done by selecting distinct request_no's from requests table
for those rows where select_status = 0
"""
# This resource is for the admin to obtain all the rows in the requests table
# with a particular request_no
class AdminReqNoDetails(Resource):
@jwt_required
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('request_no', type=int, required=True, help="request_no cannot be left blank!")
data = parser.parse_args()
#create query string
qstr = f""" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; """
try:
return query(qstr)
except:
return {
"message" : "There was an error connecting to the requests table while retrieving."
}, 500
|
flexible
|
{
"blob_id": "d436362468b847e427bc14ca221cf0fe4b2623e3",
"index": 4408,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AdminReqNoDetails(Resource):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AdminReqNoDetails(Resource):\n\n @jwt_required\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('request_no', type=int, required=True, help=\n 'request_no cannot be left blank!')\n data = parser.parse_args()\n qstr = (\n f\" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; \"\n )\n try:\n return query(qstr)\n except:\n return {'message':\n 'There was an error connecting to the requests table while retrieving.'\n }, 500\n",
"step-4": "from flask_restful import Resource, reqparse\nfrom db import query\nimport pymysql\nfrom flask_jwt_extended import jwt_required\n<mask token>\n\n\nclass AdminReqNoDetails(Resource):\n\n @jwt_required\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('request_no', type=int, required=True, help=\n 'request_no cannot be left blank!')\n data = parser.parse_args()\n qstr = (\n f\" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; \"\n )\n try:\n return query(qstr)\n except:\n return {'message':\n 'There was an error connecting to the requests table while retrieving.'\n }, 500\n",
"step-5": "from flask_restful import Resource, reqparse\r\nfrom db import query\r\nimport pymysql\r\nfrom flask_jwt_extended import jwt_required\r\n\r\n\"\"\"\r\nThis module is used to retrieve the data \r\nfor all the request_no's which have a false or a 0 select_status.\r\nThis is done by selecting distinct request_no's from requests table \r\nfor those rows where select_status = 0\r\n\"\"\"\r\n\r\n# This resource is for the admin to obtain all the rows in the requests table \r\n# with a particular request_no\r\nclass AdminReqNoDetails(Resource):\r\n \r\n @jwt_required\r\n def get(self):\r\n parser = reqparse.RequestParser()\r\n parser.add_argument('request_no', type=int, required=True, help=\"request_no cannot be left blank!\")\r\n data = parser.parse_args()\r\n #create query string\r\n qstr = f\"\"\" SELECT r_id,request_no,image FROM requests WHERE request_no = {data['request_no']}; \"\"\"\r\n try:\r\n return query(qstr)\r\n except:\r\n return {\r\n \"message\" : \"There was an error connecting to the requests table while retrieving.\"\r\n }, 500\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if(not root) :
return "X"
else :
return ",".join([str(root.val), self.serialize(root.left), self.serialize(root.right)])
# Q = [root]
# res = []
# while(Q) :
# newQ = []
# noChange = True
# while(Q) :
# v = Q.pop(0)
# if(v == None) :
# res.append(' ')
# newQ.append(None)
# newQ.append(None)
# else :
# res.append(str(v.val))
# if(v.left == None) :
# newQ.append(None)
# else :
# noChange = False
# newQ.append(v.left)
# if(v.right == None) :
# newQ.append(None)
# else :
# noChange = False
# newQ.append(v.right)
# if(noChange) :
# break
# Q = newQ
# return ','.join(res)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
self.data = data
if(data[0] == "X") :
return None
else :
t = TreeNode(int(self.data[: self.data.find(",")]))
t.left = self.deserialize(self.data[self.data.find(",") + 1 :])
t.right = self.deserialize(self.data[self.data.find(",") + 1 :])
return t
# arr = data.split(",")
# l = len(arr)
# if(l == 0 or arr[0] == " ") :
# return None
# t = TreeNode(int(arr[0]))
# Q = [t]
# half = (l + 1) / 2 - 1
# i = 0
# while(i < half) :
# v = Q.pop(0)
# if(v == None) :
# i += 1
# Q.append(None)
# Q.append(None)
# continue
# if(arr[2 * i + 1] == ' ') :
# v.left = None
# Q.append(None)
# else :
# l = TreeNode(int(arr[2 * i + 1]))
# v.left = l
# Q.append(l)
# if(arr[2 * i + 2] == ' ') :
# v.right = None
# Q.append(None)
# else :
# r = TreeNode(int(arr[2 * i + 2]))
# v.right = r
# Q.append(r)
# i += 1
# return t
# Your Codec object will be instantiated and called as such:
# ser = Codec()
# deser = Codec()
# ans = deser.deserialize(ser.serialize(root))
|
normal
|
{
"blob_id": "006e1088e72201fab7eebd1409c025b5dba69403",
"index": 5938,
"step-1": "<mask token>\n",
"step-2": "class Codec:\n <mask token>\n <mask token>\n",
"step-3": "class Codec:\n <mask token>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n self.data = data\n if data[0] == 'X':\n return None\n else:\n t = TreeNode(int(self.data[:self.data.find(',')]))\n t.left = self.deserialize(self.data[self.data.find(',') + 1:])\n t.right = self.deserialize(self.data[self.data.find(',') + 1:])\n return t\n",
"step-4": "class Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n if not root:\n return 'X'\n else:\n return ','.join([str(root.val), self.serialize(root.left), self\n .serialize(root.right)])\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n self.data = data\n if data[0] == 'X':\n return None\n else:\n t = TreeNode(int(self.data[:self.data.find(',')]))\n t.left = self.deserialize(self.data[self.data.find(',') + 1:])\n t.right = self.deserialize(self.data[self.data.find(',') + 1:])\n return t\n",
"step-5": "# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n \n :type root: TreeNode\n :rtype: str\n \"\"\"\n if(not root) :\n return \"X\"\n else :\n return \",\".join([str(root.val), self.serialize(root.left), self.serialize(root.right)])\n \n \n# Q = [root]\n# res = []\n# while(Q) :\n# newQ = []\n# noChange = True\n# while(Q) :\n# v = Q.pop(0)\n# if(v == None) :\n# res.append(' ')\n# newQ.append(None)\n# newQ.append(None)\n# else :\n# res.append(str(v.val))\n \n# if(v.left == None) :\n# newQ.append(None)\n# else :\n# noChange = False\n# newQ.append(v.left) \n \n# if(v.right == None) :\n# newQ.append(None)\n# else :\n# noChange = False\n# newQ.append(v.right)\n\n \n# if(noChange) :\n# break\n# Q = newQ\n# return ','.join(res)\n \n \n \n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \n :type data: str\n :rtype: TreeNode\n \"\"\"\n self.data = data\n \n if(data[0] == \"X\") :\n return None\n else :\n t = TreeNode(int(self.data[: self.data.find(\",\")]))\n t.left = self.deserialize(self.data[self.data.find(\",\") + 1 :])\n t.right = self.deserialize(self.data[self.data.find(\",\") + 1 :])\n return t\n \n \n \n# arr = data.split(\",\")\n \n# l = len(arr)\n \n# if(l == 0 or arr[0] == \" \") :\n# return None\n \n# t = TreeNode(int(arr[0]))\n \n# Q = [t]\n \n# half = (l + 1) / 2 - 1\n \n# i = 0\n \n \n# while(i < half) :\n# v = Q.pop(0)\n# if(v == None) :\n# i += 1\n# Q.append(None)\n# Q.append(None)\n# continue\n \n# if(arr[2 * i + 1] == ' ') :\n# v.left = None\n# Q.append(None)\n# else :\n# l = TreeNode(int(arr[2 * i + 1]))\n# v.left = l\n# Q.append(l)\n# if(arr[2 * i + 2] == ' ') :\n# v.right = None\n# Q.append(None)\n# else :\n# r = TreeNode(int(arr[2 * i + 2]))\n# v.right = r\n# Q.append(r)\n# i += 1\n# return t\n \n \n\n# Your Codec object will be instantiated and called as such:\n# ser = Codec()\n# deser = Codec()\n# ans = deser.deserialize(ser.serialize(root))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def get_all_lefts(word,substring):
if len(substring) == 0:
yield ((len(word),word),)
else:
if substring[0] not in word:
yield (-1,)
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i+1:],substring[1:]):
yield ((i,word[:i]),*sub_sequance)
if __name__ == '__main__':
word = input('')
substring = input('')
maxNum = 0
for lefts in map(list,get_all_lefts(word,substring)):
if -1 in lefts:
continue
print(lefts)
print(maxNum)
|
normal
|
{
"blob_id": "8c0377b70b902e6e61351869a4378b4c2c50a3a7",
"index": 2478,
"step-1": "<mask token>\n",
"step-2": "def get_all_lefts(word, substring):\n if len(substring) == 0:\n yield (len(word), word),\n elif substring[0] not in word:\n yield -1,\n else:\n for i in range(len(word)):\n if word[i] == substring[0]:\n for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):\n yield (i, word[:i]), *sub_sequance\n\n\n<mask token>\n",
"step-3": "def get_all_lefts(word, substring):\n if len(substring) == 0:\n yield (len(word), word),\n elif substring[0] not in word:\n yield -1,\n else:\n for i in range(len(word)):\n if word[i] == substring[0]:\n for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):\n yield (i, word[:i]), *sub_sequance\n\n\nif __name__ == '__main__':\n word = input('')\n substring = input('')\n maxNum = 0\n for lefts in map(list, get_all_lefts(word, substring)):\n if -1 in lefts:\n continue\n print(lefts)\n print(maxNum)\n",
"step-4": "def get_all_lefts(word,substring):\n if len(substring) == 0:\n yield ((len(word),word),)\n else:\n if substring[0] not in word:\n yield (-1,)\n else:\n for i in range(len(word)):\n if word[i] == substring[0]:\n for sub_sequance in get_all_lefts(word[i+1:],substring[1:]):\n yield ((i,word[:i]),*sub_sequance)\n\nif __name__ == '__main__':\n word = input('')\n substring = input('')\n maxNum = 0\n for lefts in map(list,get_all_lefts(word,substring)):\n if -1 in lefts:\n continue\n print(lefts)\n print(maxNum)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from processing.DLDataEngineering import DLDataEngineering
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import numpy as np
import h5py
import os
from scipy.ndimage import gaussian_filter
#Deep learning packages
import tensorflow as tf
#from tensorflow import keras
from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply
from tensorflow.keras.backend import max
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow import keras
from sklearn.metrics import f1_score,roc_auc_score
import matplotlib.pyplot as plt
import cartopy.feature as cf
import cartopy.crs as ccrs
import cartopy
from keras_unet_collection import models, base, utils
class DLModeler(object):
def __init__(self,model_path,hf_path,num_examples,
class_percentages,predictors,model_args,
model_type):
self.model_path = model_path
self.hf_path = hf_path
self.num_examples = num_examples
self.class_percentages = class_percentages
self.model_args = model_args
self.model_type = model_type
long_predictors = []
#Shorten predictor names
for predictor in predictors:
if "_" in predictor:
predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]
elif " " in predictor:
predictor_name = ''.join([v[0].upper() for v in predictor.split()])
else: predictor_name = predictor
long_predictors.append(predictor_name)
self.predictors = np.array(long_predictors)
#Class to read data and standardize
self.dldataeng = DLDataEngineering(self.model_path,self.hf_path,
self.num_examples,self.class_percentages,self.predictors,
self.model_args)
return
def train_models(self,member,train_dates,valid_dates):
"""
Function that reads and extracts pre-processed 2d member data
from an ensemble to train a convolutional neural net (cnn) or
UNET.
The model data is standardized before being input to the cnn,
with the observation data in the shape (# examples, # classes).
Args:
member (str): ensemble member data that trains a DL model
"""
train_data, train_label = self.dldataeng.extract_training_data(member,
train_dates,self.model_type)
#valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)
valid_data, valid_label = [],[]
if self.model_type == 'CNN':
onehot_encoder = OneHotEncoder(sparse=False,categories='auto')
encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))
self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)
elif 'UNET' in self.model_type:
#train_label[train_label >= 50.] = 50.
#log_train_label = np.log((train_label+1.0))
self.train_UNET(member,train_data,train_label,valid_data,valid_label)
return
def train_UNET(self,member,trainX,trainY,validX,validY):
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
'''
if os.path.exists(model_file):
del trainX,trainY,validX,validY
unet = tf.keras.models.load_model(model_file,compile=False)
print(f'\nOpening {model_file}\n')
#self.validate_UNET(model,validX,validY,threshold_file)
return
'''
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
#print('Validation data shape {0}'.format(np.shape(validX)))
#print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1,
'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU',
'output_activation':'ReLU', 'batch_norm':False, 'pool':True,
'unpool':False, 'name':f'{self.model_type}'}
if self.model_type == 'UNET':
model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]
unet_model_obj = models.unet_2d
compile_params = {'loss': 'mean_squared_error'}
else:
compile_params = {'loss': ['mean_squared_error',
'mean_squared_error','mean_squared_error',
'mean_squared_error','mean_squared_error'],
'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}
if self.model_type == 'UNET2plus':
plus_model_params = {'filter_num':[16, 32, 64, 128, 256],
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_plus_2d
elif self.model_type == 'UNET3plus':
plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],
'filter_num_skip':'auto', 'filter_num_aggregate':'auto',
'deep_supervision':True}
model_obj_params.update(plus_model_params)
unet_model_obj = models.unet_3plus_2d
try: unet_model = unet_model_obj(**model_obj_params)
except:
print(f"{self.model_type} Model type not found.")
return
unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))
print(unet_model.summary())
#Augment data
aug = ImageDataGenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
#Fit UNET
n_epochs = 15
bs = 256
conv_hist = unet_model.fit(
aug.flow(trainX,trainY,batch_size=bs),
steps_per_epoch=len(trainX)/bs,
epochs=n_epochs,verbose=1)
'''
pred_s = trainX[0].reshape(1,input_shape[0],
input_shape[1],input_shape[2])
prediction = unet.predict(pred_s)[0,:,:,:]
print(prediction.shape)
plt.imshow(prediction)
plt.colorbar()
plt.show()
return
'''
#Save trained model
unet_model.save(model_file)
print(f'Writing out {model_file}')
#Clear graphs
tf.keras.backend.clear_session()
#self.validate_UNET(model,validX,validY,threshold_file)
return
def train_CNN(self,member,input_data):
"""
Function to train a convolutional neural net (CNN) for random
training data and associated labels.
Args:
member (str): Ensemble member
trainX (tuple): Tuple of (train data, train labels,
validation data, validation labels)
"""
trainX,trainY,validX,validY = input_data
print('\nTraining {0} models'.format(member))
print('Training data shape {0}'.format(np.shape(trainX)))
print('Training label data shape {0}\n'.format(np.shape(trainY)))
print('Validation data shape {0}'.format(np.shape(validX)))
print('Validation label data shape {0}\n'.format(np.shape(validY)))
model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'
print(model_file)
if not os.path.exists(model_file):
# Clear graphs
tf.keras.backend.clear_session()
#Initiliaze Convolutional Neural Net (CNN)
model = models.Sequential()
input_shape = np.shape(trainX[0])
#First layer: input shape (y,x,# variables)
#Add noise
model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))
for filters in [32,64,128]:
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.Conv2D(filters, (3,3),padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.MaxPooling2D())
#Flatten the last convolutional layer
model.add(layers.Flatten())
model.add(layers.Dense(256))
model.add(layers.LeakyReLU(alpha=0.3))
model.add(layers.Dense(4,activation='softmax'))
#Compile neural net
model.compile(optimizer='adam',loss='categorical_crossentropy',
metrics=[tf.keras.metrics.AUC()])
print(model.summary())
#fit neural net
n_epochs = 10
bs = 256
#augment data
aug = imagedatagenerator(
rotation_range=10,zoom_range=0.15,
width_shift_range=0.2,height_shift_range=0.2,
fill_mode="nearest")
train_generator = aug.flow(trainx,trainy,batch_size=bs)
conv_hist = model.fit(
train_generator,steps_per_epoch=len(trainx) // bs,
epochs=n_epochs,verbose=1,class_weight=self.class_percentages)
#save trained model
model.save(model_file)
print(f'Writing out {model_file}')
else:
model = tf.keras.models.load_model(model_file)
print(f'\nOpening {model_file}\n')
del trainY,trainX
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if os.path.exists(threshold_file):
del validX,validY
return
self.validate_CNN(model,validX,validY,threshold_file)
return
def validate_CNN(self,model,validX,validY,threshold_file):
print()
#Predict on validation data
cnn_preds = model.predict(validX)
sev_hail = cnn_preds[:,2]
sig_hail = cnn_preds[:,3]
#combine the severe hail and sig severe hail classes
sev_prob_preds = sev_hail+sig_hail
print('Max probability',np.nanmax(sev_prob_preds))
#classify labels as severe hail or no hail
true_preds = np.where(validY >= 2, 1, 0)
del validX, validY
df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])
#Find threshold with the highest validation AUC score
auc_score = []
thresholds = np.arange(0.1,1.01,0.02)
for t in thresholds:
threshold_preds = np.where(sev_prob_preds >= t,1,0)
auc_score.append(roc_auc_score(true_preds, threshold_preds))
print(auc_score)
#output threshold with highest AUC
df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]
print(df_best_score)
df_best_score.to_csv(threshold_file)
print(f'Writing out {threshold_file}')
return
def predict_model(self,member,patch_map_conversion_indices,
total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):
lon_grid,lat_grid):
"""
Function that opens a pre-trained convolutional neural net (cnn).
and predicts hail probability forecasts for a single ensemble member.
Args:
Right now only includes severe hail prediction, not sig-severe
"""
##################
# Load in any saved DL model files
##################
#Clear any saved DL graphs
tf.keras.backend.clear_session()
#Load DL model
model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'
DL_model = tf.keras.models.load_model(model_file,compile=False)
if self.model_type == 'CNN':
#Use minimum prob threshold chosen with validation data
threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'
if not os.path.exists(threshold_file):
print('No thresholds found')
return
prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05
print(prob_thresh)
total_count = 0
##################
#Extract forecast data (#hours, #patches, nx, ny, #variables)
##################
forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])
if forecast_data is None:
print('No forecast data found')
return
##################
# Standardize hourly data
##################
standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour])
for hour in np.arange(forecast_data.shape[0])])
del forecast_data
##################
# Produce gridded hourly hail forecast
##################
total_grid = np.empty( (standard_forecast_data.shape[0],
total_map_shape[0]*total_map_shape[1]) )*np.nan
for hour in np.arange(standard_forecast_data.shape[0]):
print(hour)
#Predict probability of severe hail
DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))
######
# Will need to fix CNN code to reflect the conversion inds are in
#patches x (patch_radius*patch_radius) instead of (patches*radius*radius)
#####
if self.model_type == 'CNN':
severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]
severe_patches = np.zeros(subset_map_shape)
#If no hourly severe hail predicted, continue
if len(severe_proba_indices) <1 : continue
severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)
total_grid[hour,map_conversion_inds] = severe_patches.ravel()
print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))
total_count += len(severe_proba_indices)
print('Total severe probs:',total_count)
print()
elif 'UNET' in self.model_type:
for patch in np.arange(standard_forecast_data.shape[1]):
patch_indices = patch_map_conversion_indices[patch]
#Gets rid of overlapping edges
overlap_pt = 4
# If unet3+ then the last output tensor is the correct one
if DL_prediction.ndim > 4:
hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
else:
hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,
overlap_pt:-overlap_pt,0].ravel()
total_grid[hour,patch_indices] = hourly_patch_data
del DL_prediction
del standard_forecast_data
output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)
date_outpath = forecast_grid_path + f'{date[0][:-5]}/'
#Output gridded forecasts
if not os.path.exists(date_outpath): os.makedirs(date_outpath)
gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'
print(f'Writing out {gridded_out_file}')
with h5py.File(gridded_out_file, 'w') as hf:
hf.create_dataset("data",data=output_data,
compression='gzip',compression_opts=6)
return
def dice_loss(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.math.sigmoid(y_pred)
numerator = 2 * tf.reduce_sum(y_true * y_pred)
denominator = tf.reduce_sum(y_true + y_pred)
return 1 - numerator / denominator
'''
From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/
'''
def down_block(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
p = layers.MaxPooling2D((2,2))(c)
return c, p
def up_block(x, skip, filters, kernel_size=(3, 3)):
up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)
concat = layers.Concatenate()([up, skip])
c = layers.Conv2D(filters, kernel_size, padding='same')(concat)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
def bottleneck(x, filters, kernel_size=(3, 3)):
c = layers.Conv2D(filters, kernel_size, padding='same')(x)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
c = layers.Conv2D(filters, kernel_size, padding='same')(c)
c = layers.LeakyReLU(alpha=0.2)(c)
c = layers.BatchNormalization()(c)
return c
|
normal
|
{
"blob_id": "a0a6bd5de39a7599f7872639cdf3a59b8cda5498",
"index": 5230,
"step-1": "<mask token>\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\n<mask token>\n\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n",
"step-3": "<mask token>\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\ndef up_block(x, skip, filters, kernel_size=(3, 3)):\n up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)\n concat = layers.Concatenate()([up, skip])\n c = layers.Conv2D(filters, kernel_size, padding='same')(concat)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n",
"step-4": "from processing.DLDataEngineering import DLDataEngineering\nfrom sklearn.preprocessing import OneHotEncoder\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport os\nfrom scipy.ndimage import gaussian_filter\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply\nfrom tensorflow.keras.backend import max\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import f1_score, roc_auc_score\nimport matplotlib.pyplot as plt\nimport cartopy.feature as cf\nimport cartopy.crs as ccrs\nimport cartopy\nfrom keras_unet_collection import models, base, utils\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\ndef dice_loss(y_true, y_pred):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.math.sigmoid(y_pred)\n numerator = 2 * tf.reduce_sum(y_true * y_pred)\n denominator = tf.reduce_sum(y_true + y_pred)\n return 1 - numerator / denominator\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\ndef up_block(x, skip, filters, kernel_size=(3, 3)):\n up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)\n concat = layers.Concatenate()([up, skip])\n c = layers.Conv2D(filters, kernel_size, padding='same')(concat)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n",
"step-5": "from processing.DLDataEngineering import DLDataEngineering\nfrom sklearn.preprocessing import OneHotEncoder\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport os\n\nfrom scipy.ndimage import gaussian_filter\n \n#Deep learning packages\nimport tensorflow as tf\n#from tensorflow import keras\nfrom tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply\nfrom tensorflow.keras.backend import max\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n#from tensorflow import keras \nfrom sklearn.metrics import f1_score,roc_auc_score\n\nimport matplotlib.pyplot as plt\nimport cartopy.feature as cf \nimport cartopy.crs as ccrs\nimport cartopy\n\nfrom keras_unet_collection import models, base, utils\n\nclass DLModeler(object):\n def __init__(self,model_path,hf_path,num_examples,\n class_percentages,predictors,model_args,\n model_type):\n \n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args \n self.model_type = model_type\n \n long_predictors = []\n #Shorten predictor names\n \n for predictor in predictors:\n if \"_\" in predictor: \n predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]\n elif \" \" in predictor: \n predictor_name = ''.join([v[0].upper() for v in predictor.split()])\n else: predictor_name = predictor\n long_predictors.append(predictor_name)\n \n self.predictors = np.array(long_predictors)\n \n #Class to read data and standardize\n self.dldataeng = DLDataEngineering(self.model_path,self.hf_path, \n self.num_examples,self.class_percentages,self.predictors,\n self.model_args)\n \n \n return\n \n\n def train_models(self,member,train_dates,valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates,self.model_type)\n \n #valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)\n valid_data, valid_label = [],[]\n \n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False,categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))\n self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)\n\n elif 'UNET' in self.model_type:\n #train_label[train_label >= 50.] = 50. \n #log_train_label = np.log((train_label+1.0))\n self.train_UNET(member,train_data,train_label,valid_data,valid_label)\n \n return \n\n def train_UNET(self,member,trainX,trainY,validX,validY):\n \n model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'\n \n '''\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\\nOpening {model_file}\\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n '''\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n #print('Validation data shape {0}'.format(np.shape(validX)))\n #print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1, \n 'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU', \n 'output_activation':'ReLU', 'batch_norm':False, 'pool':True, \n 'unpool':False, 'name':f'{self.model_type}'}\n \n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n \n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error','mean_squared_error',\n 'mean_squared_error','mean_squared_error'],\n 'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus': \n plus_model_params = {'filter_num':[16, 32, 64, 128, 256],\n 'deep_supervision':True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n\n elif self.model_type == 'UNET3plus': \n plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],\n 'filter_num_skip':'auto', 'filter_num_aggregate':'auto',\n 'deep_supervision':True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n \n try: unet_model = unet_model_obj(**model_obj_params)\n except: \n print(f\"{self.model_type} Model type not found.\")\n return\n \n unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))\n print(unet_model.summary())\n \n #Augment data\n aug = ImageDataGenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n #Fit UNET\n n_epochs = 15\n bs = 256\n \n conv_hist = unet_model.fit(\n aug.flow(trainX,trainY,batch_size=bs),\n steps_per_epoch=len(trainX)/bs,\n epochs=n_epochs,verbose=1) \n '''\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n '''\n #Save trained model\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n \n #Clear graphs\n tf.keras.backend.clear_session()\n \n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \n \n def train_CNN(self,member,input_data): \n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX,trainY,validX,validY = input_data\n \n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n \n model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'\n print(model_file)\n if not os.path.exists(model_file):\n # Clear graphs\n tf.keras.backend.clear_session()\n \n #Initiliaze Convolutional Neural Net (CNN)\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n \n #First layer: input shape (y,x,# variables) \n #Add noise\n model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))\n for filters in [32,64,128]:\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n \n #Flatten the last convolutional layer \n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4,activation='softmax'))\n #Compile neural net\n model.compile(optimizer='adam',loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n #fit neural net\n n_epochs = 10\n bs = 256\n\n #augment data\n aug = imagedatagenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n \n train_generator = aug.flow(trainx,trainy,batch_size=bs)\n conv_hist = model.fit(\n train_generator,steps_per_epoch=len(trainx) // bs,\n epochs=n_epochs,verbose=1,class_weight=self.class_percentages)\n #save trained model\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n\n del trainY,trainX\n \n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if os.path.exists(threshold_file): \n del validX,validY\n return\n \n self.validate_CNN(model,validX,validY,threshold_file)\n return \n\n def validate_CNN(self,model,validX,validY,threshold_file): \n print()\n #Predict on validation data\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:,2]\n sig_hail = cnn_preds[:,3]\n #combine the severe hail and sig severe hail classes\n sev_prob_preds = sev_hail+sig_hail\n print('Max probability',np.nanmax(sev_prob_preds))\n #classify labels as severe hail or no hail\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n \n df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])\n #Find threshold with the highest validation AUC score \n auc_score = []\n thresholds = np.arange(0.1,1.01,0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t,1,0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n \n print(auc_score)\n #output threshold with highest AUC \n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return \n \n \n def predict_model(self,member,patch_map_conversion_indices,\n total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):\n lon_grid,lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n \n ################## \n # Load in any saved DL model files\n ################## \n \n #Clear any saved DL graphs\n tf.keras.backend.clear_session()\n \n #Load DL model\n model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'\n DL_model = tf.keras.models.load_model(model_file,compile=False) \n \n if self.model_type == 'CNN':\n #Use minimum prob threshold chosen with validation data\n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return \n prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05\n print(prob_thresh) \n total_count = 0\n \n ################## \n #Extract forecast data (#hours, #patches, nx, ny, #variables)\n ################## \n \n forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])\n \n if forecast_data is None: \n print('No forecast data found')\n return\n \n ################## \n # Standardize hourly data\n ################## \n \n standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour]) \n for hour in np.arange(forecast_data.shape[0])])\n \n del forecast_data\n ################## \n # Produce gridded hourly hail forecast \n ################## \n\n total_grid = np.empty( (standard_forecast_data.shape[0],\n total_map_shape[0]*total_map_shape[1]) )*np.nan\n\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n #Predict probability of severe hail\n DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))\n ######\n # Will need to fix CNN code to reflect the conversion inds are in \n #patches x (patch_radius*patch_radius) instead of (patches*radius*radius)\n #####\n if self.model_type == 'CNN':\n severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n #If no hourly severe hail predicted, continue\n if len(severe_proba_indices) <1 : continue\n severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)\n total_grid[hour,map_conversion_inds] = severe_patches.ravel()\n print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))\n total_count += len(severe_proba_indices)\n print('Total severe probs:',total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n #Gets rid of overlapping edges\n overlap_pt = 4\n # If unet3+ then the last output tensor is the correct one\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,\n overlap_pt:-overlap_pt,0].ravel()\n else:\n hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,\n overlap_pt:-overlap_pt,0].ravel()\n total_grid[hour,patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)\n \n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n \n #Output gridded forecasts\n if not os.path.exists(date_outpath): os.makedirs(date_outpath)\n gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf: \n hf.create_dataset(\"data\",data=output_data,\n compression='gzip',compression_opts=6)\n \n return\n\ndef dice_loss(y_true, y_pred):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.math.sigmoid(y_pred)\n numerator = 2 * tf.reduce_sum(y_true * y_pred)\n denominator = tf.reduce_sum(y_true + y_pred)\n return 1 - numerator / denominator\n\n'''\nFrom: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/\n''' \n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2,2))(c)\n return c, p\n\ndef up_block(x, skip, filters, kernel_size=(3, 3)):\n up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)\n concat = layers.Concatenate()([up, skip])\n c = layers.Conv2D(filters, kernel_size, padding='same')(concat)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(':')[0]
connection = siteLines()
with connection.cursor() as cursor:
sql = f"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else:
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(':')[0]
connection = siteLines()
with connection.cursor() as cursor:
sql = f"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else:
pass
def SiteStringLine(ip, host, pagel, objects, title):
connection = siteLines()
with connection:
with connection.cursor() as cursor:
sql = (
f"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')"
)
cursor.execute(sql)
connection.commit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(':')[0]
connection = siteLines()
with connection.cursor() as cursor:
sql = f"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else:
pass
def SiteStringLine(ip, host, pagel, objects, title):
connection = siteLines()
with connection:
with connection.cursor() as cursor:
sql = (
f"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')"
)
cursor.execute(sql)
connection.commit()
form = cgi.FieldStorage()
open('gates.log', 'a+', encoding='utf-8').write(str(form) + '\n')
if form.__contains__('host'):
ip = form.__contains__('ip')
host = form.__contains__('host')
pagel = form.__contains__('pagel')
objects = form.__contains__('words')
title = form.__contains__('title')
thread0 = threading.Thread(target=checkStringLine, args=(form['ip'].
value, form['host'].value, form['pagel'].value, form['words'].value,
form['title'].value))
thread0.start()
<|reserved_special_token_1|>
import cgi
import pymysql
import pymysql.cursors
import binascii
import os
from mylib import siteLines
import threading
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(':')[0]
connection = siteLines()
with connection.cursor() as cursor:
sql = f"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else:
pass
def SiteStringLine(ip, host, pagel, objects, title):
connection = siteLines()
with connection:
with connection.cursor() as cursor:
sql = (
f"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')"
)
cursor.execute(sql)
connection.commit()
form = cgi.FieldStorage()
open('gates.log', 'a+', encoding='utf-8').write(str(form) + '\n')
if form.__contains__('host'):
ip = form.__contains__('ip')
host = form.__contains__('host')
pagel = form.__contains__('pagel')
objects = form.__contains__('words')
title = form.__contains__('title')
thread0 = threading.Thread(target=checkStringLine, args=(form['ip'].
value, form['host'].value, form['pagel'].value, form['words'].value,
form['title'].value))
thread0.start()
<|reserved_special_token_1|>
#!/usr/local/bin/python
import cgi
import pymysql
import pymysql.cursors
import binascii
import os
from mylib import siteLines
import threading
def checkStringLine(ip, host, pagel, objects, title):
onlyIp = ip.split(":")[0]
connection = siteLines()
with connection.cursor() as cursor:
# Read a single record
sql = f"SELECT `IP` FROM `sites` WHERE `IP`=\'{onlyIp}\'"
cursor.execute(sql)
result = cursor.fetchone()
if result == None:
SiteStringLine(ip, host, pagel, objects, title)
else: pass
def SiteStringLine(ip, host, pagel, objects, title):
connection = siteLines()
with connection:
with connection.cursor() as cursor:
# Create a new record
sql = f"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES (\'{ip}\', \'{host}\', \'{pagel}\', \'{objects}\', \'{title}\')"
cursor.execute(sql)
connection.commit()
form = cgi.FieldStorage()
open("gates.log", "a+", encoding="utf-8").write(str(form) + "\n")
if form.__contains__("host"):
ip = form.__contains__("ip")
host = form.__contains__("host")
pagel = form.__contains__("pagel")
objects = form.__contains__("words")
title = form.__contains__("title")
thread0 = threading.Thread(target = checkStringLine, args = (form["ip"].value, form["host"].value, form["pagel"].value, form["words"].value, form["title"].value))
thread0.start()
|
flexible
|
{
"blob_id": "6c5c07dadbe7ec70a210ee42e756be0d710c0993",
"index": 5272,
"step-1": "<mask token>\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\ndef SiteStringLine(ip, host, pagel, objects, title):\n connection = siteLines()\n with connection:\n with connection.cursor() as cursor:\n sql = (\n f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')\"\n )\n cursor.execute(sql)\n connection.commit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\ndef SiteStringLine(ip, host, pagel, objects, title):\n connection = siteLines()\n with connection:\n with connection.cursor() as cursor:\n sql = (\n f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')\"\n )\n cursor.execute(sql)\n connection.commit()\n\n\nform = cgi.FieldStorage()\nopen('gates.log', 'a+', encoding='utf-8').write(str(form) + '\\n')\nif form.__contains__('host'):\n ip = form.__contains__('ip')\n host = form.__contains__('host')\n pagel = form.__contains__('pagel')\n objects = form.__contains__('words')\n title = form.__contains__('title')\n thread0 = threading.Thread(target=checkStringLine, args=(form['ip'].\n value, form['host'].value, form['pagel'].value, form['words'].value,\n form['title'].value))\n thread0.start()\n",
"step-4": "import cgi\nimport pymysql\nimport pymysql.cursors\nimport binascii\nimport os\nfrom mylib import siteLines\nimport threading\n\n\ndef checkStringLine(ip, host, pagel, objects, title):\n onlyIp = ip.split(':')[0]\n connection = siteLines()\n with connection.cursor() as cursor:\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`='{onlyIp}'\"\n cursor.execute(sql)\n result = cursor.fetchone()\n if result == None:\n SiteStringLine(ip, host, pagel, objects, title)\n else:\n pass\n\n\ndef SiteStringLine(ip, host, pagel, objects, title):\n connection = siteLines()\n with connection:\n with connection.cursor() as cursor:\n sql = (\n f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES ('{ip}', '{host}', '{pagel}', '{objects}', '{title}')\"\n )\n cursor.execute(sql)\n connection.commit()\n\n\nform = cgi.FieldStorage()\nopen('gates.log', 'a+', encoding='utf-8').write(str(form) + '\\n')\nif form.__contains__('host'):\n ip = form.__contains__('ip')\n host = form.__contains__('host')\n pagel = form.__contains__('pagel')\n objects = form.__contains__('words')\n title = form.__contains__('title')\n thread0 = threading.Thread(target=checkStringLine, args=(form['ip'].\n value, form['host'].value, form['pagel'].value, form['words'].value,\n form['title'].value))\n thread0.start()\n",
"step-5": "#!/usr/local/bin/python\r\nimport cgi\r\nimport pymysql\r\nimport pymysql.cursors\r\nimport binascii\r\nimport os\r\nfrom mylib import siteLines\r\nimport threading\r\n\r\ndef checkStringLine(ip, host, pagel, objects, title):\r\n onlyIp = ip.split(\":\")[0]\r\n connection = siteLines()\r\n with connection.cursor() as cursor:\r\n # Read a single record\r\n sql = f\"SELECT `IP` FROM `sites` WHERE `IP`=\\'{onlyIp}\\'\"\r\n cursor.execute(sql)\r\n result = cursor.fetchone()\r\n if result == None:\r\n SiteStringLine(ip, host, pagel, objects, title)\r\n else: pass\r\n\r\ndef SiteStringLine(ip, host, pagel, objects, title):\r\n connection = siteLines()\r\n with connection:\r\n with connection.cursor() as cursor:\r\n # Create a new record\r\n sql = f\"INSERT INTO `sites` (`IP`, `URL`, `PageLeight`, `Objects`, `Title`) VALUES (\\'{ip}\\', \\'{host}\\', \\'{pagel}\\', \\'{objects}\\', \\'{title}\\')\"\r\n cursor.execute(sql)\r\n connection.commit()\r\n\r\n\r\nform = cgi.FieldStorage()\r\nopen(\"gates.log\", \"a+\", encoding=\"utf-8\").write(str(form) + \"\\n\")\r\nif form.__contains__(\"host\"):\r\n ip = form.__contains__(\"ip\")\r\n host = form.__contains__(\"host\")\r\n pagel = form.__contains__(\"pagel\")\r\n objects = form.__contains__(\"words\")\r\n title = form.__contains__(\"title\")\r\n thread0 = threading.Thread(target = checkStringLine, args = (form[\"ip\"].value, form[\"host\"].value, form[\"pagel\"].value, form[\"words\"].value, form[\"title\"].value))\r\n thread0.start()\r\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
"""
Mount /sys/fs/cgroup Option
"""
from typing import Callable
import click
def cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:
"""
Option for choosing to mount `/sys/fs/cgroup` into the container.
"""
function = click.option(
'--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',
default=True,
show_default=True,
help=(
'Mounting ``/sys/fs/cgroup`` from the host is required to run '
'applications which require ``cgroup`` isolation. '
'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '
'the host.'
),
)(command) # type: Callable[..., None]
return function
|
normal
|
{
"blob_id": "237f5e2e37187e26b5628032e37d3a525ef72b9a",
"index": 7261,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cgroup_mount_option(command: Callable[..., None]) ->Callable[..., None]:\n \"\"\"\n Option for choosing to mount `/sys/fs/cgroup` into the container.\n \"\"\"\n function = click.option('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',\n default=True, show_default=True, help=\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'\n )(command)\n return function\n",
"step-3": "<mask token>\nfrom typing import Callable\nimport click\n\n\ndef cgroup_mount_option(command: Callable[..., None]) ->Callable[..., None]:\n \"\"\"\n Option for choosing to mount `/sys/fs/cgroup` into the container.\n \"\"\"\n function = click.option('--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',\n default=True, show_default=True, help=\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run applications which require ``cgroup`` isolation. Choose to not mount ``/sys/fs/cgroup`` if it is not available on the host.'\n )(command)\n return function\n",
"step-4": "\"\"\"\nMount /sys/fs/cgroup Option\n\"\"\"\n\nfrom typing import Callable\n\nimport click\n\n\ndef cgroup_mount_option(command: Callable[..., None]) -> Callable[..., None]:\n \"\"\"\n Option for choosing to mount `/sys/fs/cgroup` into the container.\n \"\"\"\n function = click.option(\n '--mount-sys-fs-cgroup/--no-mount-sys-fs-cgroup',\n default=True,\n show_default=True,\n help=(\n 'Mounting ``/sys/fs/cgroup`` from the host is required to run '\n 'applications which require ``cgroup`` isolation. '\n 'Choose to not mount ``/sys/fs/cgroup`` if it is not available on '\n 'the host.'\n ),\n )(command) # type: Callable[..., None]\n return function\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
pairs = ['usdt', 'btc']
warn_msg = '** WARN ** '
info_msg = '** INFO **'
|
normal
|
{
"blob_id": "26289d88ac51ee359faa81ca70b01879d2b1f840",
"index": 9460,
"step-1": "<mask token>\n",
"step-2": "pairs = ['usdt', 'btc']\nwarn_msg = '** WARN ** '\ninfo_msg = '** INFO **'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class Partition(Enum):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Partition(Enum):
<|reserved_special_token_0|>
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Partition(Enum):
"""Names of dataset partitions"""
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from enum import Enum
import torch
from torch.utils.data import Dataset
class Partition(Enum):
"""Names of dataset partitions"""
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
<|reserved_special_token_1|>
"""Datasets, Dataloaders, and utils for dataloading"""
from enum import Enum
import torch
from torch.utils.data import Dataset
class Partition(Enum):
"""Names of dataset partitions"""
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
class RandomClassData(Dataset):
"""Standard normal distributed features and uniformly sampled discrete targets"""
def __init__(self, n_samples: int, n_dim: int, n_classes: int = 2):
super(RandomClassData, self).__init__()
self.features = torch.rand((n_samples, n_dim))
self.targets = torch.randint(0, n_classes, size=(n_samples,))
def __len__(self):
return len(self.targets)
def __getitem__(self, i):
return self.features[i], self.targets[i]
|
flexible
|
{
"blob_id": "4c0c88f46c2d4607d9ac00755bf122e847ea2f6a",
"index": 6221,
"step-1": "<mask token>\n\n\nclass Partition(Enum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-2": "<mask token>\n\n\nclass Partition(Enum):\n <mask token>\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-3": "<mask token>\n\n\nclass Partition(Enum):\n \"\"\"Names of dataset partitions\"\"\"\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-4": "<mask token>\nfrom enum import Enum\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Partition(Enum):\n \"\"\"Names of dataset partitions\"\"\"\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int=2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-5": "\"\"\"Datasets, Dataloaders, and utils for dataloading\"\"\"\nfrom enum import Enum\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass Partition(Enum):\n \"\"\"Names of dataset partitions\"\"\"\n TRAIN = 'train'\n VAL = 'val'\n TEST = 'test'\n\n\nclass RandomClassData(Dataset):\n \"\"\"Standard normal distributed features and uniformly sampled discrete targets\"\"\"\n\n def __init__(self, n_samples: int, n_dim: int, n_classes: int = 2):\n super(RandomClassData, self).__init__()\n self.features = torch.rand((n_samples, n_dim))\n self.targets = torch.randint(0, n_classes, size=(n_samples,))\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, i):\n return self.features[i], self.targets[i]\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.ion()
<|reserved_special_token_0|>
print('Visualizing example dataset for outlier detection.')
<|reserved_special_token_0|>
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
print('Visualizing Gaussian fit.')
<|reserved_special_token_0|>
vf.visualize_fit(X, mu, sigma2)
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print(
'(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')
<|reserved_special_token_0|>
plt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',
edgecolors='r')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))
print(
'(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'
)
input('ex8 Finished. Press ENTER to exit')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.ion()
<|reserved_special_token_0|>
print('Visualizing example dataset for outlier detection.')
data = scio.loadmat('ex8data1.mat')
X = data['X']
Xval = data['Xval']
yval = data['yval'].flatten()
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
print('Visualizing Gaussian fit.')
mu, sigma2 = eg.estimate_gaussian(X)
p = mvg.multivariate_gaussian(X, mu, sigma2)
vf.visualize_fit(X, mu, sigma2)
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
epsilon, f1 = st.select_threshold(yval, pval)
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print(
'(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')
outliers = np.where(p < epsilon)
plt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',
edgecolors='r')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
data = scio.loadmat('ex8data2.mat')
X = data['X']
Xval = data['Xval']
yval = data['yval'].flatten()
mu, sigma2 = eg.estimate_gaussian(X)
p = mvg.multivariate_gaussian(X, mu, sigma2)
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
epsilon, f1 = st.select_threshold(yval, pval)
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))
print(
'(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'
)
input('ex8 Finished. Press ENTER to exit')
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import estimateGaussian as eg
import multivariateGaussian as mvg
import visualizeFit as vf
import selectThreshold as st
plt.ion()
<|reserved_special_token_0|>
print('Visualizing example dataset for outlier detection.')
data = scio.loadmat('ex8data1.mat')
X = data['X']
Xval = data['Xval']
yval = data['yval'].flatten()
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
print('Visualizing Gaussian fit.')
mu, sigma2 = eg.estimate_gaussian(X)
p = mvg.multivariate_gaussian(X, mu, sigma2)
vf.visualize_fit(X, mu, sigma2)
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
epsilon, f1 = st.select_threshold(yval, pval)
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print(
'(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')
outliers = np.where(p < epsilon)
plt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',
edgecolors='r')
input('Program paused. Press ENTER to continue')
<|reserved_special_token_0|>
data = scio.loadmat('ex8data2.mat')
X = data['X']
Xval = data['Xval']
yval = data['yval'].flatten()
mu, sigma2 = eg.estimate_gaussian(X)
p = mvg.multivariate_gaussian(X, mu, sigma2)
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
epsilon, f1 = st.select_threshold(yval, pval)
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))
print(
'(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'
)
input('ex8 Finished. Press ENTER to exit')
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import estimateGaussian as eg
import multivariateGaussian as mvg
import visualizeFit as vf
import selectThreshold as st
plt.ion()
# np.set_printoptions(formatter={'float': '{: 0.6f}'.format})
'''第1部分 加载示例数据集'''
#先通过一个小数据集进行异常检测 便于可视化
# 数据集包含两个特征
# 一些机器的等待时间和吞吐量 实验目的找出其中可能有异常的机器
print('Visualizing example dataset for outlier detection.')
data = scio.loadmat('ex8data1.mat')
X = data['X']#训练集样本特征矩阵
Xval = data['Xval'] #验证集样本特征矩阵
yval = data['yval'].flatten() #验证集样本标签 异常/正常
# 可视化样例训练集
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)') #x1等待时间
plt.ylabel('Throughput (mb/s') #x2吞吐量
input('Program paused. Press ENTER to continue')
'''第2部分 估计训练集的分布'''
# 假设数据集的各个特征服从高斯分布
print('Visualizing Gaussian fit.')
# 参数估计
mu, sigma2 = eg.estimate_gaussian(X)
# 计算训练集的概率分布
p = mvg.multivariate_gaussian(X, mu, sigma2)
#可视化训练集的概率分布 画出等高线图
vf.visualize_fit(X, mu, sigma2)
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s')
input('Program paused. Press ENTER to continue')
'''第3部分 基于验证集 得到一个最好的概率分布阈值'''
pval = mvg.multivariate_gaussian(Xval, mu, sigma2) #根据训练集的概率分布 得到验证集样本的概率
epsilon, f1 = st.select_threshold(yval, pval) #选择合适的概率阈值
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')
# 标出训练集中的异常值
outliers = np.where(p < epsilon)
plt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none', edgecolors='r')
input('Program paused. Press ENTER to continue')
'''第4部分 基于大数据集 进行异常检测(特征数很多)'''
data = scio.loadmat('ex8data2.mat')
X = data['X'] #训练集样本特征矩阵
Xval = data['Xval'] #验证集样本特征矩阵
yval = data['yval'].flatten() #验证集样本标签 1异常 0正常
#参数估计
mu, sigma2 = eg.estimate_gaussian(X)
# 计算训练集的概率分布
p = mvg.multivariate_gaussian(X, mu, sigma2)
# 得到验证集每个样本的概率
pval = mvg.multivariate_gaussian(Xval, mu, sigma2)
# 选择一个最好的阈值
epsilon, f1 = st.select_threshold(yval, pval)
#验证程序正确性
print('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))
print('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))
print('# Outliers found: {}'.format(np.sum(np.less(p, epsilon)))) #训练集上的异常样本数量
print('(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)')
input('ex8 Finished. Press ENTER to exit')
|
flexible
|
{
"blob_id": "de6b9961e0572338c87802314e7ae3cded5168b4",
"index": 487,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.ion()\n<mask token>\nprint('Visualizing example dataset for outlier detection.')\n<mask token>\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Visualizing Gaussian fit.')\n<mask token>\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint(\n '(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\n<mask token>\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',\n edgecolors='r')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))\nprint(\n '(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'\n )\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-3": "<mask token>\nplt.ion()\n<mask token>\nprint('Visualizing example dataset for outlier detection.')\ndata = scio.loadmat('ex8data1.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Visualizing Gaussian fit.')\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint(\n '(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\noutliers = np.where(p < epsilon)\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',\n edgecolors='r')\ninput('Program paused. Press ENTER to continue')\n<mask token>\ndata = scio.loadmat('ex8data2.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))\nprint(\n '(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'\n )\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as scio\nimport estimateGaussian as eg\nimport multivariateGaussian as mvg\nimport visualizeFit as vf\nimport selectThreshold as st\nplt.ion()\n<mask token>\nprint('Visualizing example dataset for outlier detection.')\ndata = scio.loadmat('ex8data1.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\nprint('Visualizing Gaussian fit.')\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\ninput('Program paused. Press ENTER to continue')\n<mask token>\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint(\n '(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\noutliers = np.where(p < epsilon)\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none',\n edgecolors='r')\ninput('Program paused. Press ENTER to continue')\n<mask token>\ndata = scio.loadmat('ex8data2.mat')\nX = data['X']\nXval = data['Xval']\nyval = data['yval'].flatten()\nmu, sigma2 = eg.estimate_gaussian(X)\np = mvg.multivariate_gaussian(X, mu, sigma2)\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\nepsilon, f1 = st.select_threshold(yval, pval)\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon))))\nprint(\n '(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)'\n )\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as scio\n\nimport estimateGaussian as eg\nimport multivariateGaussian as mvg\nimport visualizeFit as vf\nimport selectThreshold as st\n\nplt.ion()\n# np.set_printoptions(formatter={'float': '{: 0.6f}'.format})\n\n'''第1部分 加载示例数据集'''\n\n#先通过一个小数据集进行异常检测 便于可视化\n\n# 数据集包含两个特征 \n# 一些机器的等待时间和吞吐量 实验目的找出其中可能有异常的机器\n\n\nprint('Visualizing example dataset for outlier detection.')\n\n\ndata = scio.loadmat('ex8data1.mat')\nX = data['X']#训练集样本特征矩阵\nXval = data['Xval'] #验证集样本特征矩阵\nyval = data['yval'].flatten() #验证集样本标签 异常/正常 \n\n# 可视化样例训练集\nplt.figure()\nplt.scatter(X[:, 0], X[:, 1], c='b', marker='x', s=15, linewidth=1)\nplt.axis([0, 30, 0, 30])\nplt.xlabel('Latency (ms)') #x1等待时间\nplt.ylabel('Throughput (mb/s') #x2吞吐量\n\n\ninput('Program paused. Press ENTER to continue')\n\n'''第2部分 估计训练集的分布'''\n# 假设数据集的各个特征服从高斯分布\n\nprint('Visualizing Gaussian fit.')\n\n# 参数估计 \nmu, sigma2 = eg.estimate_gaussian(X)\n\n# 计算训练集的概率分布\np = mvg.multivariate_gaussian(X, mu, sigma2)\n#可视化训练集的概率分布 画出等高线图\nvf.visualize_fit(X, mu, sigma2)\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s')\n\ninput('Program paused. Press ENTER to continue')\n\n'''第3部分 基于验证集 得到一个最好的概率分布阈值'''\npval = mvg.multivariate_gaussian(Xval, mu, sigma2) #根据训练集的概率分布 得到验证集样本的概率\n\nepsilon, f1 = st.select_threshold(yval, pval) #选择合适的概率阈值\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('(you should see a value epsilon of about 8.99e-05 and F1 of about 0.875)')\n\n# 标出训练集中的异常值\noutliers = np.where(p < epsilon)\nplt.scatter(X[outliers, 0], X[outliers, 1], marker='o', facecolors='none', edgecolors='r')\n\ninput('Program paused. Press ENTER to continue')\n\n\n'''第4部分 基于大数据集 进行异常检测(特征数很多)'''\ndata = scio.loadmat('ex8data2.mat')\nX = data['X'] #训练集样本特征矩阵\nXval = data['Xval'] #验证集样本特征矩阵\nyval = data['yval'].flatten() #验证集样本标签 1异常 0正常\n\n#参数估计\nmu, sigma2 = eg.estimate_gaussian(X)\n\n# 计算训练集的概率分布\np = mvg.multivariate_gaussian(X, mu, sigma2)\n\n# 得到验证集每个样本的概率\npval = mvg.multivariate_gaussian(Xval, mu, sigma2)\n\n# 选择一个最好的阈值\nepsilon, f1 = st.select_threshold(yval, pval)\n\n#验证程序正确性\nprint('Best epsilon found using cross-validation: {:0.4e}'.format(epsilon))\nprint('Best F1 on Cross Validation Set: {:0.6f}'.format(f1))\nprint('# Outliers found: {}'.format(np.sum(np.less(p, epsilon)))) #训练集上的异常样本数量\nprint('(you should see a value epsilon of about 1.38e-18, F1 of about 0.615, and 117 outliers)')\n\ninput('ex8 Finished. Press ENTER to exit')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',
'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',
'lorem')]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',
'choice'), ('street_1', 'street_address'), ('street_2',
'street_address'), ('street_3', 'street_address'), ('city', 'city'),
('state', 'choice'), ('state_other', 'varchar'), ('postal_code',
'uk_postcode'), ('display', 'bool')]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',
'varchar'), ('description', 'lorem'), ('date_received', 'date'), (
'display', 'bool')]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (
'experience_type', 'choice'), ('title', 'varchar'), ('description',
'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',
'choice'), ('country', 'varchar'), ('start_date', 'date'), (
'end_date', 'date'), ('display', 'bool')]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',
'lorem'), ('display', 'bool')]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',
'choice'), ('school', 'varchar'), ('description', 'lorem'), (
'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),
('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',
'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',
'SKIP')]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AlumniAnonymizer(Anonymizer):
model = Alumni
attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (
'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',
'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),
('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (
'employer', 'varchar'), ('specialty', 'varchar'), ('medium',
'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (
'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',
'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (
'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',
'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'
), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',
'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',
'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),
('agents_year', 'choice'), ('agents_notes', 'lorem'), (
'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (
'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (
'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (
'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (
'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',
'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (
'revision', 'integer')]
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',
'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',
'lorem')]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',
'choice'), ('street_1', 'street_address'), ('street_2',
'street_address'), ('street_3', 'street_address'), ('city', 'city'),
('state', 'choice'), ('state_other', 'varchar'), ('postal_code',
'uk_postcode'), ('display', 'bool')]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',
'varchar'), ('description', 'lorem'), ('date_received', 'date'), (
'display', 'bool')]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (
'experience_type', 'choice'), ('title', 'varchar'), ('description',
'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',
'choice'), ('country', 'varchar'), ('start_date', 'date'), (
'end_date', 'date'), ('display', 'bool')]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',
'lorem'), ('display', 'bool')]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',
'choice'), ('school', 'varchar'), ('description', 'lorem'), (
'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),
('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',
'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',
'SKIP')]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MediumAnonymizer(Anonymizer):
model = Medium
attributes = [('medium_id', 'integer'), ('description', 'varchar')]
class ProfileAnonymizer(Anonymizer):
model = Profile
attributes = [('user_id', 'SKIP'), ('person_id', 'SKIP'), (
'datatel_avatar_url', 'SKIP'), ('suffix', 'choice'), ('salutation',
'choice'), ('middle_name', 'name'), ('title', 'varchar'), ('about',
'lorem'), ('email2', 'email'), ('home_phone1', 'phonenumber'), (
'biz_phone1', 'phonenumber'), ('mobile_phone1', 'phonenumber'), (
'fax', 'phonenumber'), ('allow_contact', 'bool'), ('show_name',
'bool'), ('url_personal', 'varchar'), ('url_org', 'varchar'), (
'accepted_terms', 'bool'), ('email_on_follow', 'bool')]
class StaffAnonymizer(Anonymizer):
model = Staff
attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (
'extension', 'varchar')]
class InstructorAnonymizer(Anonymizer):
model = Instructor
attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (
'extension', 'varchar'), ('bio_short', 'lorem'), ('bio_long', 'lorem')]
class StudentAnonymizer(Anonymizer):
model = Student
attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (
'funding_amount', 'SKIP'), ('enrollment_date', 'date'), (
'program_length', 'integer'), ('visiting_scholar', 'bool')]
class AlumniAnonymizer(Anonymizer):
model = Alumni
attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (
'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',
'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),
('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (
'employer', 'varchar'), ('specialty', 'varchar'), ('medium',
'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (
'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',
'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (
'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',
'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'
), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',
'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',
'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),
('agents_year', 'choice'), ('agents_notes', 'lorem'), (
'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (
'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (
'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (
'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (
'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',
'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (
'revision', 'integer')]
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',
'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',
'lorem')]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',
'choice'), ('street_1', 'street_address'), ('street_2',
'street_address'), ('street_3', 'street_address'), ('city', 'city'),
('state', 'choice'), ('state_other', 'varchar'), ('postal_code',
'uk_postcode'), ('display', 'bool')]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',
'varchar'), ('description', 'lorem'), ('date_received', 'date'), (
'display', 'bool')]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (
'experience_type', 'choice'), ('title', 'varchar'), ('description',
'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',
'choice'), ('country', 'varchar'), ('start_date', 'date'), (
'end_date', 'date'), ('display', 'bool')]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',
'lorem'), ('display', 'bool')]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',
'choice'), ('school', 'varchar'), ('description', 'lorem'), (
'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),
('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',
'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',
'SKIP')]
<|reserved_special_token_1|>
from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers
from anonymizer import Anonymizer
class MediumAnonymizer(Anonymizer):
model = Medium
attributes = [('medium_id', 'integer'), ('description', 'varchar')]
class ProfileAnonymizer(Anonymizer):
model = Profile
attributes = [('user_id', 'SKIP'), ('person_id', 'SKIP'), (
'datatel_avatar_url', 'SKIP'), ('suffix', 'choice'), ('salutation',
'choice'), ('middle_name', 'name'), ('title', 'varchar'), ('about',
'lorem'), ('email2', 'email'), ('home_phone1', 'phonenumber'), (
'biz_phone1', 'phonenumber'), ('mobile_phone1', 'phonenumber'), (
'fax', 'phonenumber'), ('allow_contact', 'bool'), ('show_name',
'bool'), ('url_personal', 'varchar'), ('url_org', 'varchar'), (
'accepted_terms', 'bool'), ('email_on_follow', 'bool')]
class StaffAnonymizer(Anonymizer):
model = Staff
attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (
'extension', 'varchar')]
class InstructorAnonymizer(Anonymizer):
model = Instructor
attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (
'extension', 'varchar'), ('bio_short', 'lorem'), ('bio_long', 'lorem')]
class StudentAnonymizer(Anonymizer):
model = Student
attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (
'funding_amount', 'SKIP'), ('enrollment_date', 'date'), (
'program_length', 'integer'), ('visiting_scholar', 'bool')]
class AlumniAnonymizer(Anonymizer):
model = Alumni
attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (
'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',
'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),
('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (
'employer', 'varchar'), ('specialty', 'varchar'), ('medium',
'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (
'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',
'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (
'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',
'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'
), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',
'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',
'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),
('agents_year', 'choice'), ('agents_notes', 'lorem'), (
'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (
'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (
'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (
'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (
'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',
'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (
'revision', 'integer')]
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',
'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',
'lorem')]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',
'choice'), ('street_1', 'street_address'), ('street_2',
'street_address'), ('street_3', 'street_address'), ('city', 'city'),
('state', 'choice'), ('state_other', 'varchar'), ('postal_code',
'uk_postcode'), ('display', 'bool')]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',
'varchar'), ('description', 'lorem'), ('date_received', 'date'), (
'display', 'bool')]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (
'experience_type', 'choice'), ('title', 'varchar'), ('description',
'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',
'choice'), ('country', 'varchar'), ('start_date', 'date'), (
'end_date', 'date'), ('display', 'bool')]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',
'lorem'), ('display', 'bool')]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',
'choice'), ('school', 'varchar'), ('description', 'lorem'), (
'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),
('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',
'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',
'SKIP')]
<|reserved_special_token_1|>
from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers
from anonymizer import Anonymizer
class MediumAnonymizer(Anonymizer):
model = Medium
attributes = [
('medium_id', "integer"),
('description', "varchar"),
]
class ProfileAnonymizer(Anonymizer):
model = Profile
attributes = [
('user_id', "SKIP"),
('person_id', "SKIP"),
('datatel_avatar_url', "SKIP"),
('suffix', "choice"),
('salutation', "choice"),
('middle_name', "name"),
('title', "varchar"),
('about', "lorem"),
('email2', "email"),
('home_phone1', "phonenumber"),
('biz_phone1', "phonenumber"),
('mobile_phone1', "phonenumber"),
('fax', "phonenumber"),
('allow_contact', "bool"),
('show_name', "bool"),
('url_personal', "varchar"),
('url_org', "varchar"),
('accepted_terms', "bool"),
('email_on_follow', "bool"),
]
class StaffAnonymizer(Anonymizer):
model = Staff
attributes = [
('profile_id', "SKIP"),
('office_num', "varchar"),
('extension', "varchar"),
]
class InstructorAnonymizer(Anonymizer):
model = Instructor
attributes = [
('profile_id', "SKIP"),
('office_num', "varchar"),
('extension', "varchar"),
('bio_short', "lorem"),
('bio_long', "lorem"),
]
class StudentAnonymizer(Anonymizer):
model = Student
attributes = [
('profile_id', "SKIP"),
('grad_year', "choice"),
('funding_amount', "SKIP"),
('enrollment_date', "date"),
('program_length', "integer"),
('visiting_scholar', "bool"),
]
class AlumniAnonymizer(Anonymizer):
model = Alumni
attributes = [
('profile_id', "SKIP"),
('grad_year', "choice"),
('third_year', "bool"),
('j200_inst', "varchar"),
('funding_amount', "SKIP"),
('enrollment_date', "date"),
('program_length', "integer"),
('equipment_balance', "SKIP"),
('visiting_scholar', "bool"),
('employer', "varchar"),
('specialty', "varchar"),
('medium', "choice"),
('prev_emp1', "varchar"),
('prev_emp2', "varchar"),
('prev_emp3', "varchar"),
('notes_exclude', "bool"),
('notes', "lorem"),
('mod_date', "date"),
('pub_display', "bool"),
('freelance', "bool"),
('region', "choice"),
('prev_intern1', "varchar"),
('prev_intern2', "varchar"),
('prev_intern3', "varchar"),
('first_job', "varchar"),
('books', "lorem"),
('deceased_notes', "varchar"),
('mia', "bool"),
('mia_notes', "lorem"),
('interview', "bool"),
('interview_year', "choice"),
('interview_notes', "lorem"),
('agents_year', "choice"),
('agents_notes', "lorem"),
('event_attend_notes', "lorem"),
('famous_notes', "lorem"),
('volunteer_speak', "bool"),
('volunteer_committee', "bool"),
('volunteer_interview', "bool"),
('volunteer_mentor', "bool"),
('volunteer_agent', "bool"),
('maillist_class', "bool"),
('no_maillists', "bool"),
('no_reminder', "bool"),
('suggestions', "lorem"),
('committee_notes', "lorem"),
('inactive', "bool"),
('revision', "integer"),
]
class DonationAnonymizer(Anonymizer):
model = Donation
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('amount', "integer"),
('date', "date"),
('description', "varchar"),
('notes', "lorem"),
]
class AddressAnonymizer(Anonymizer):
model = Address
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('address_type', "choice"),
('street_1', "street_address"),
('street_2', "street_address"),
('street_3', "street_address"),
('city', "city"),
('state', "choice"),
('state_other', "varchar"),
('postal_code', "uk_postcode"),
('display', "bool"),
]
class AwardAnonymizer(Anonymizer):
model = Award
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('title', "varchar"),
('description', "lorem"),
('date_received', "date"),
('display', "bool"),
]
class ReferenceAnonymizer(Anonymizer):
model = Reference
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('body', "lorem"),
]
class ExperienceAnonymizer(Anonymizer):
model = Experience
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('experience_type', "choice"),
('title', "varchar"),
('description', "lorem"),
('company', "varchar"),
('city', "city"),
('state', "choice"),
('country', "varchar"),
('start_date', "date"),
('end_date', "date"),
('display', "bool"),
]
class SkillAnonymizer(Anonymizer):
model = Skill
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('summary', "lorem"),
('display', "bool"),
]
class EducationAnonymizer(Anonymizer):
model = Education
attributes = [
('id', "SKIP"),
('profile_id', "SKIP"),
('diploma', "choice"),
('school', "varchar"),
('description', "lorem"),
('start_date', "date"),
('end_date', "date"),
('display', "bool"),
]
class ImporterUsersAnonymizer(Anonymizer):
model = ImporterUsers
attributes = [
('id', "SKIP"),
('action', "SKIP"),
('person_id', "SKIP"),
('section_id', "SKIP"),
('first_name', "SKIP"),
('last_name', "SKIP"),
('email', "SKIP"),
('photo_url', "SKIP"),
('person_type', "SKIP"),
]
|
flexible
|
{
"blob_id": "63182a8708729606f96794cddb163f707252ba61",
"index": 3205,
"step-1": "<mask token>\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-2": "<mask token>\n\n\nclass AlumniAnonymizer(Anonymizer):\n model = Alumni\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',\n 'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),\n ('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (\n 'employer', 'varchar'), ('specialty', 'varchar'), ('medium',\n 'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (\n 'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',\n 'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (\n 'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',\n 'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'\n ), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',\n 'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',\n 'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),\n ('agents_year', 'choice'), ('agents_notes', 'lorem'), (\n 'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (\n 'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (\n 'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (\n 'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (\n 'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',\n 'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (\n 'revision', 'integer')]\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-3": "<mask token>\n\n\nclass MediumAnonymizer(Anonymizer):\n model = Medium\n attributes = [('medium_id', 'integer'), ('description', 'varchar')]\n\n\nclass ProfileAnonymizer(Anonymizer):\n model = Profile\n attributes = [('user_id', 'SKIP'), ('person_id', 'SKIP'), (\n 'datatel_avatar_url', 'SKIP'), ('suffix', 'choice'), ('salutation',\n 'choice'), ('middle_name', 'name'), ('title', 'varchar'), ('about',\n 'lorem'), ('email2', 'email'), ('home_phone1', 'phonenumber'), (\n 'biz_phone1', 'phonenumber'), ('mobile_phone1', 'phonenumber'), (\n 'fax', 'phonenumber'), ('allow_contact', 'bool'), ('show_name',\n 'bool'), ('url_personal', 'varchar'), ('url_org', 'varchar'), (\n 'accepted_terms', 'bool'), ('email_on_follow', 'bool')]\n\n\nclass StaffAnonymizer(Anonymizer):\n model = Staff\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar')]\n\n\nclass InstructorAnonymizer(Anonymizer):\n model = Instructor\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar'), ('bio_short', 'lorem'), ('bio_long', 'lorem')]\n\n\nclass StudentAnonymizer(Anonymizer):\n model = Student\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'funding_amount', 'SKIP'), ('enrollment_date', 'date'), (\n 'program_length', 'integer'), ('visiting_scholar', 'bool')]\n\n\nclass AlumniAnonymizer(Anonymizer):\n model = Alumni\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',\n 'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),\n ('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (\n 'employer', 'varchar'), ('specialty', 'varchar'), ('medium',\n 'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (\n 'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',\n 'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (\n 'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',\n 'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'\n ), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',\n 'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',\n 'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),\n ('agents_year', 'choice'), ('agents_notes', 'lorem'), (\n 'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (\n 'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (\n 'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (\n 'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (\n 'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',\n 'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (\n 'revision', 'integer')]\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-4": "from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers\nfrom anonymizer import Anonymizer\n\n\nclass MediumAnonymizer(Anonymizer):\n model = Medium\n attributes = [('medium_id', 'integer'), ('description', 'varchar')]\n\n\nclass ProfileAnonymizer(Anonymizer):\n model = Profile\n attributes = [('user_id', 'SKIP'), ('person_id', 'SKIP'), (\n 'datatel_avatar_url', 'SKIP'), ('suffix', 'choice'), ('salutation',\n 'choice'), ('middle_name', 'name'), ('title', 'varchar'), ('about',\n 'lorem'), ('email2', 'email'), ('home_phone1', 'phonenumber'), (\n 'biz_phone1', 'phonenumber'), ('mobile_phone1', 'phonenumber'), (\n 'fax', 'phonenumber'), ('allow_contact', 'bool'), ('show_name',\n 'bool'), ('url_personal', 'varchar'), ('url_org', 'varchar'), (\n 'accepted_terms', 'bool'), ('email_on_follow', 'bool')]\n\n\nclass StaffAnonymizer(Anonymizer):\n model = Staff\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar')]\n\n\nclass InstructorAnonymizer(Anonymizer):\n model = Instructor\n attributes = [('profile_id', 'SKIP'), ('office_num', 'varchar'), (\n 'extension', 'varchar'), ('bio_short', 'lorem'), ('bio_long', 'lorem')]\n\n\nclass StudentAnonymizer(Anonymizer):\n model = Student\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'funding_amount', 'SKIP'), ('enrollment_date', 'date'), (\n 'program_length', 'integer'), ('visiting_scholar', 'bool')]\n\n\nclass AlumniAnonymizer(Anonymizer):\n model = Alumni\n attributes = [('profile_id', 'SKIP'), ('grad_year', 'choice'), (\n 'third_year', 'bool'), ('j200_inst', 'varchar'), ('funding_amount',\n 'SKIP'), ('enrollment_date', 'date'), ('program_length', 'integer'),\n ('equipment_balance', 'SKIP'), ('visiting_scholar', 'bool'), (\n 'employer', 'varchar'), ('specialty', 'varchar'), ('medium',\n 'choice'), ('prev_emp1', 'varchar'), ('prev_emp2', 'varchar'), (\n 'prev_emp3', 'varchar'), ('notes_exclude', 'bool'), ('notes',\n 'lorem'), ('mod_date', 'date'), ('pub_display', 'bool'), (\n 'freelance', 'bool'), ('region', 'choice'), ('prev_intern1',\n 'varchar'), ('prev_intern2', 'varchar'), ('prev_intern3', 'varchar'\n ), ('first_job', 'varchar'), ('books', 'lorem'), ('deceased_notes',\n 'varchar'), ('mia', 'bool'), ('mia_notes', 'lorem'), ('interview',\n 'bool'), ('interview_year', 'choice'), ('interview_notes', 'lorem'),\n ('agents_year', 'choice'), ('agents_notes', 'lorem'), (\n 'event_attend_notes', 'lorem'), ('famous_notes', 'lorem'), (\n 'volunteer_speak', 'bool'), ('volunteer_committee', 'bool'), (\n 'volunteer_interview', 'bool'), ('volunteer_mentor', 'bool'), (\n 'volunteer_agent', 'bool'), ('maillist_class', 'bool'), (\n 'no_maillists', 'bool'), ('no_reminder', 'bool'), ('suggestions',\n 'lorem'), ('committee_notes', 'lorem'), ('inactive', 'bool'), (\n 'revision', 'integer')]\n\n\nclass DonationAnonymizer(Anonymizer):\n model = Donation\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('amount',\n 'integer'), ('date', 'date'), ('description', 'varchar'), ('notes',\n 'lorem')]\n\n\nclass AddressAnonymizer(Anonymizer):\n model = Address\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('address_type',\n 'choice'), ('street_1', 'street_address'), ('street_2',\n 'street_address'), ('street_3', 'street_address'), ('city', 'city'),\n ('state', 'choice'), ('state_other', 'varchar'), ('postal_code',\n 'uk_postcode'), ('display', 'bool')]\n\n\nclass AwardAnonymizer(Anonymizer):\n model = Award\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('title',\n 'varchar'), ('description', 'lorem'), ('date_received', 'date'), (\n 'display', 'bool')]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n model = Reference\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('body', 'lorem')]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n model = Experience\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), (\n 'experience_type', 'choice'), ('title', 'varchar'), ('description',\n 'lorem'), ('company', 'varchar'), ('city', 'city'), ('state',\n 'choice'), ('country', 'varchar'), ('start_date', 'date'), (\n 'end_date', 'date'), ('display', 'bool')]\n\n\nclass SkillAnonymizer(Anonymizer):\n model = Skill\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('summary',\n 'lorem'), ('display', 'bool')]\n\n\nclass EducationAnonymizer(Anonymizer):\n model = Education\n attributes = [('id', 'SKIP'), ('profile_id', 'SKIP'), ('diploma',\n 'choice'), ('school', 'varchar'), ('description', 'lorem'), (\n 'start_date', 'date'), ('end_date', 'date'), ('display', 'bool')]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n model = ImporterUsers\n attributes = [('id', 'SKIP'), ('action', 'SKIP'), ('person_id', 'SKIP'),\n ('section_id', 'SKIP'), ('first_name', 'SKIP'), ('last_name',\n 'SKIP'), ('email', 'SKIP'), ('photo_url', 'SKIP'), ('person_type',\n 'SKIP')]\n",
"step-5": "from people.models import Medium, Profile, Staff, Instructor, Student, Alumni, Donation, Address, Award, Reference, Experience, Skill, Education, ImporterUsers\nfrom anonymizer import Anonymizer\n\nclass MediumAnonymizer(Anonymizer):\n\n model = Medium\n\n attributes = [\n ('medium_id', \"integer\"),\n ('description', \"varchar\"),\n ]\n\n\nclass ProfileAnonymizer(Anonymizer):\n\n model = Profile\n\n attributes = [\n ('user_id', \"SKIP\"),\n ('person_id', \"SKIP\"),\n ('datatel_avatar_url', \"SKIP\"),\n ('suffix', \"choice\"),\n ('salutation', \"choice\"),\n ('middle_name', \"name\"),\n ('title', \"varchar\"),\n ('about', \"lorem\"),\n ('email2', \"email\"),\n ('home_phone1', \"phonenumber\"),\n ('biz_phone1', \"phonenumber\"),\n ('mobile_phone1', \"phonenumber\"),\n ('fax', \"phonenumber\"),\n ('allow_contact', \"bool\"),\n ('show_name', \"bool\"),\n ('url_personal', \"varchar\"),\n ('url_org', \"varchar\"),\n ('accepted_terms', \"bool\"),\n ('email_on_follow', \"bool\"),\n ]\n\n\nclass StaffAnonymizer(Anonymizer):\n\n model = Staff\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('office_num', \"varchar\"),\n ('extension', \"varchar\"),\n ]\n\n\nclass InstructorAnonymizer(Anonymizer):\n\n model = Instructor\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('office_num', \"varchar\"),\n ('extension', \"varchar\"),\n ('bio_short', \"lorem\"),\n ('bio_long', \"lorem\"),\n ]\n\n\nclass StudentAnonymizer(Anonymizer):\n\n model = Student\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('grad_year', \"choice\"),\n ('funding_amount', \"SKIP\"),\n ('enrollment_date', \"date\"),\n ('program_length', \"integer\"),\n ('visiting_scholar', \"bool\"),\n ]\n\n\nclass AlumniAnonymizer(Anonymizer):\n\n model = Alumni\n\n attributes = [\n ('profile_id', \"SKIP\"),\n ('grad_year', \"choice\"),\n ('third_year', \"bool\"),\n ('j200_inst', \"varchar\"),\n ('funding_amount', \"SKIP\"),\n ('enrollment_date', \"date\"),\n ('program_length', \"integer\"),\n ('equipment_balance', \"SKIP\"),\n ('visiting_scholar', \"bool\"),\n ('employer', \"varchar\"),\n ('specialty', \"varchar\"),\n ('medium', \"choice\"),\n ('prev_emp1', \"varchar\"),\n ('prev_emp2', \"varchar\"),\n ('prev_emp3', \"varchar\"),\n ('notes_exclude', \"bool\"),\n ('notes', \"lorem\"),\n ('mod_date', \"date\"),\n ('pub_display', \"bool\"),\n ('freelance', \"bool\"),\n ('region', \"choice\"),\n ('prev_intern1', \"varchar\"),\n ('prev_intern2', \"varchar\"),\n ('prev_intern3', \"varchar\"),\n ('first_job', \"varchar\"),\n ('books', \"lorem\"),\n ('deceased_notes', \"varchar\"),\n ('mia', \"bool\"),\n ('mia_notes', \"lorem\"),\n ('interview', \"bool\"),\n ('interview_year', \"choice\"),\n ('interview_notes', \"lorem\"),\n ('agents_year', \"choice\"),\n ('agents_notes', \"lorem\"),\n ('event_attend_notes', \"lorem\"),\n ('famous_notes', \"lorem\"),\n ('volunteer_speak', \"bool\"),\n ('volunteer_committee', \"bool\"),\n ('volunteer_interview', \"bool\"),\n ('volunteer_mentor', \"bool\"),\n ('volunteer_agent', \"bool\"),\n ('maillist_class', \"bool\"),\n ('no_maillists', \"bool\"),\n ('no_reminder', \"bool\"),\n ('suggestions', \"lorem\"),\n ('committee_notes', \"lorem\"),\n ('inactive', \"bool\"),\n ('revision', \"integer\"),\n ]\n\n\nclass DonationAnonymizer(Anonymizer):\n\n model = Donation\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('amount', \"integer\"),\n ('date', \"date\"),\n ('description', \"varchar\"),\n ('notes', \"lorem\"),\n ]\n\n\nclass AddressAnonymizer(Anonymizer):\n\n model = Address\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('address_type', \"choice\"),\n ('street_1', \"street_address\"),\n ('street_2', \"street_address\"),\n ('street_3', \"street_address\"),\n ('city', \"city\"),\n ('state', \"choice\"),\n ('state_other', \"varchar\"),\n ('postal_code', \"uk_postcode\"),\n ('display', \"bool\"),\n ]\n\n\nclass AwardAnonymizer(Anonymizer):\n\n model = Award\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('title', \"varchar\"),\n ('description', \"lorem\"),\n ('date_received', \"date\"),\n ('display', \"bool\"),\n ]\n\n\nclass ReferenceAnonymizer(Anonymizer):\n\n model = Reference\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('body', \"lorem\"),\n ]\n\n\nclass ExperienceAnonymizer(Anonymizer):\n\n model = Experience\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('experience_type', \"choice\"),\n ('title', \"varchar\"),\n ('description', \"lorem\"),\n ('company', \"varchar\"),\n ('city', \"city\"),\n ('state', \"choice\"),\n ('country', \"varchar\"),\n ('start_date', \"date\"),\n ('end_date', \"date\"),\n ('display', \"bool\"),\n ]\n\n\nclass SkillAnonymizer(Anonymizer):\n\n model = Skill\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('summary', \"lorem\"),\n ('display', \"bool\"),\n ]\n\n\nclass EducationAnonymizer(Anonymizer):\n\n model = Education\n\n attributes = [\n ('id', \"SKIP\"),\n ('profile_id', \"SKIP\"),\n ('diploma', \"choice\"),\n ('school', \"varchar\"),\n ('description', \"lorem\"),\n ('start_date', \"date\"),\n ('end_date', \"date\"),\n ('display', \"bool\"),\n ]\n\n\nclass ImporterUsersAnonymizer(Anonymizer):\n\n model = ImporterUsers\n\n attributes = [\n ('id', \"SKIP\"),\n ('action', \"SKIP\"),\n ('person_id', \"SKIP\"),\n ('section_id', \"SKIP\"),\n ('first_name', \"SKIP\"),\n ('last_name', \"SKIP\"),\n ('email', \"SKIP\"),\n ('photo_url', \"SKIP\"),\n ('person_type', \"SKIP\"),\n ]\n",
"step-ids": [
16,
18,
28,
29,
30
]
}
|
[
16,
18,
28,
29,
30
] |
<|reserved_special_token_0|>
class FoodpandastoreInfo2Pipeline:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FoodpandastoreInfo2Pipeline:
def __init__(self):
engine = db_connect()
create_tables(engine)
self.session = sessionmaker(bind=engine)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FoodpandastoreInfo2Pipeline:
def __init__(self):
engine = db_connect()
create_tables(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
new_store_info = FoodPandaStoreInfo2(id=item['id'], code=item[
'code'], category=item['category'], name=item['name'], url=item
['url'], rating=item.get('rating', None), address=item[
'address'], latitude=item['latitude'], longitude=item[
'longitude'], is_pickup_available=item['is_pickup_available'],
is_delivery_available=item['is_delivery_available'], is_active=
item['is_active'], date=dt.datetime.utcnow())
new_ts = TambonStore(store_id=item['id'], sub_district_id=item[
'sub_district_id'], district_id=item['district_id'],
province_id=item['province_id'], updated_datetime=datetime.utcnow()
)
existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id
=item['sub_district_id'], district_id=item['district_id'],
province_id=item['province_id']).first()
if existing_tambon:
existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(
id=item['id']).first()
existing_tambon_store = session.query(TambonStore).filter_by(
store_id=item['id'], sub_district_id=item['sub_district_id'
], district_id=item['district_id'], province_id=item[
'province_id']).first()
if existing_store_info:
session.merge(existing_store_info)
if existing_tambon_store:
session.merge(new_ts)
else:
session.add(new_ts)
else:
session.add(new_store_info)
session.add(new_ts)
menus = item.get('menus', [])
for menu in menus:
m = FoodPandaStoreMenu2(id=menu['id'], name=menu['name'],
type=menu['type'], opening_time=menu['opening_time'],
closing_time=menu['closing_time'])
new_store_info.menus.append(m)
else:
print('{}, {}, {} is not persisted in TambonGeo'.format(item[
'sub_district_id'], item['district_id'], item['province_id']))
session.commit()
session.close()
<|reserved_special_token_1|>
from sqlalchemy.orm.session import sessionmaker, query
from FoodPandaStore.FoodPandaStore.model import *
import datetime as dt
from datetime import datetime
class FoodpandastoreInfo2Pipeline:
def __init__(self):
engine = db_connect()
create_tables(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
new_store_info = FoodPandaStoreInfo2(id=item['id'], code=item[
'code'], category=item['category'], name=item['name'], url=item
['url'], rating=item.get('rating', None), address=item[
'address'], latitude=item['latitude'], longitude=item[
'longitude'], is_pickup_available=item['is_pickup_available'],
is_delivery_available=item['is_delivery_available'], is_active=
item['is_active'], date=dt.datetime.utcnow())
new_ts = TambonStore(store_id=item['id'], sub_district_id=item[
'sub_district_id'], district_id=item['district_id'],
province_id=item['province_id'], updated_datetime=datetime.utcnow()
)
existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id
=item['sub_district_id'], district_id=item['district_id'],
province_id=item['province_id']).first()
if existing_tambon:
existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(
id=item['id']).first()
existing_tambon_store = session.query(TambonStore).filter_by(
store_id=item['id'], sub_district_id=item['sub_district_id'
], district_id=item['district_id'], province_id=item[
'province_id']).first()
if existing_store_info:
session.merge(existing_store_info)
if existing_tambon_store:
session.merge(new_ts)
else:
session.add(new_ts)
else:
session.add(new_store_info)
session.add(new_ts)
menus = item.get('menus', [])
for menu in menus:
m = FoodPandaStoreMenu2(id=menu['id'], name=menu['name'],
type=menu['type'], opening_time=menu['opening_time'],
closing_time=menu['closing_time'])
new_store_info.menus.append(m)
else:
print('{}, {}, {} is not persisted in TambonGeo'.format(item[
'sub_district_id'], item['district_id'], item['province_id']))
session.commit()
session.close()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from sqlalchemy.orm.session import sessionmaker, query
from FoodPandaStore.FoodPandaStore.model import *
import datetime as dt
from datetime import datetime
class FoodpandastoreInfo2Pipeline:
def __init__(self):
engine = db_connect()
create_tables(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
new_store_info = FoodPandaStoreInfo2(
id=item['id'],
code=item['code'],
category=item['category'],
name=item['name'],
url=item['url'],
rating=item.get('rating', None),
address=item['address'],
latitude=item['latitude'],
longitude=item['longitude'],
is_pickup_available=item['is_pickup_available'],
is_delivery_available=item['is_delivery_available'],
is_active=item['is_active'],
date=dt.datetime.utcnow()
)
new_ts = TambonStore(
store_id=item['id'],
sub_district_id=item['sub_district_id'],
district_id=item['district_id'],
province_id=item['province_id'],
updated_datetime=datetime.utcnow())
existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id = item['sub_district_id'],
district_id=item['district_id'],
province_id=item['province_id']).first()
if existing_tambon:
## Store
existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(id=item['id']).first()
existing_tambon_store = session.query(TambonStore).filter_by(store_id=item['id'],
sub_district_id=item['sub_district_id'],
district_id=item['district_id'],
province_id=item['province_id']).first()
if existing_store_info:
session.merge(existing_store_info)
if existing_tambon_store:
session.merge(new_ts)
else:
session.add(new_ts)
else:
session.add(new_store_info)
session.add(new_ts)
menus = item.get('menus', [])
for menu in menus:
m = FoodPandaStoreMenu2(
id=menu['id'],
name=menu['name'],
type=menu['type'],
opening_time=menu['opening_time'],
closing_time=menu['closing_time']
)
new_store_info.menus.append(m)
else:
print('{}, {}, {} is not persisted in TambonGeo'.format(item['sub_district_id'],
item['district_id'],
item['province_id']))
session.commit()
session.close()
|
flexible
|
{
"blob_id": "f66306908f1fdd5c662804e73596b445c66dc176",
"index": 9521,
"step-1": "<mask token>\n\n\nclass FoodpandastoreInfo2Pipeline:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FoodpandastoreInfo2Pipeline:\n\n def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.session = sessionmaker(bind=engine)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FoodpandastoreInfo2Pipeline:\n\n def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.session()\n new_store_info = FoodPandaStoreInfo2(id=item['id'], code=item[\n 'code'], category=item['category'], name=item['name'], url=item\n ['url'], rating=item.get('rating', None), address=item[\n 'address'], latitude=item['latitude'], longitude=item[\n 'longitude'], is_pickup_available=item['is_pickup_available'],\n is_delivery_available=item['is_delivery_available'], is_active=\n item['is_active'], date=dt.datetime.utcnow())\n new_ts = TambonStore(store_id=item['id'], sub_district_id=item[\n 'sub_district_id'], district_id=item['district_id'],\n province_id=item['province_id'], updated_datetime=datetime.utcnow()\n )\n existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id\n =item['sub_district_id'], district_id=item['district_id'],\n province_id=item['province_id']).first()\n if existing_tambon:\n existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(\n id=item['id']).first()\n existing_tambon_store = session.query(TambonStore).filter_by(\n store_id=item['id'], sub_district_id=item['sub_district_id'\n ], district_id=item['district_id'], province_id=item[\n 'province_id']).first()\n if existing_store_info:\n session.merge(existing_store_info)\n if existing_tambon_store:\n session.merge(new_ts)\n else:\n session.add(new_ts)\n else:\n session.add(new_store_info)\n session.add(new_ts)\n menus = item.get('menus', [])\n for menu in menus:\n m = FoodPandaStoreMenu2(id=menu['id'], name=menu['name'],\n type=menu['type'], opening_time=menu['opening_time'],\n closing_time=menu['closing_time'])\n new_store_info.menus.append(m)\n else:\n print('{}, {}, {} is not persisted in TambonGeo'.format(item[\n 'sub_district_id'], item['district_id'], item['province_id']))\n session.commit()\n session.close()\n",
"step-4": "from sqlalchemy.orm.session import sessionmaker, query\nfrom FoodPandaStore.FoodPandaStore.model import *\nimport datetime as dt\nfrom datetime import datetime\n\n\nclass FoodpandastoreInfo2Pipeline:\n\n def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.session()\n new_store_info = FoodPandaStoreInfo2(id=item['id'], code=item[\n 'code'], category=item['category'], name=item['name'], url=item\n ['url'], rating=item.get('rating', None), address=item[\n 'address'], latitude=item['latitude'], longitude=item[\n 'longitude'], is_pickup_available=item['is_pickup_available'],\n is_delivery_available=item['is_delivery_available'], is_active=\n item['is_active'], date=dt.datetime.utcnow())\n new_ts = TambonStore(store_id=item['id'], sub_district_id=item[\n 'sub_district_id'], district_id=item['district_id'],\n province_id=item['province_id'], updated_datetime=datetime.utcnow()\n )\n existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id\n =item['sub_district_id'], district_id=item['district_id'],\n province_id=item['province_id']).first()\n if existing_tambon:\n existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(\n id=item['id']).first()\n existing_tambon_store = session.query(TambonStore).filter_by(\n store_id=item['id'], sub_district_id=item['sub_district_id'\n ], district_id=item['district_id'], province_id=item[\n 'province_id']).first()\n if existing_store_info:\n session.merge(existing_store_info)\n if existing_tambon_store:\n session.merge(new_ts)\n else:\n session.add(new_ts)\n else:\n session.add(new_store_info)\n session.add(new_ts)\n menus = item.get('menus', [])\n for menu in menus:\n m = FoodPandaStoreMenu2(id=menu['id'], name=menu['name'],\n type=menu['type'], opening_time=menu['opening_time'],\n closing_time=menu['closing_time'])\n new_store_info.menus.append(m)\n else:\n print('{}, {}, {} is not persisted in TambonGeo'.format(item[\n 'sub_district_id'], item['district_id'], item['province_id']))\n session.commit()\n session.close()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nfrom sqlalchemy.orm.session import sessionmaker, query\nfrom FoodPandaStore.FoodPandaStore.model import *\nimport datetime as dt\nfrom datetime import datetime\n\n\n\n\nclass FoodpandastoreInfo2Pipeline:\n\n def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.session = sessionmaker(bind=engine)\n\n\n def process_item(self, item, spider):\n\n session = self.session()\n new_store_info = FoodPandaStoreInfo2(\n id=item['id'],\n code=item['code'],\n category=item['category'],\n name=item['name'],\n url=item['url'],\n rating=item.get('rating', None),\n address=item['address'],\n latitude=item['latitude'],\n longitude=item['longitude'],\n is_pickup_available=item['is_pickup_available'],\n is_delivery_available=item['is_delivery_available'],\n is_active=item['is_active'],\n date=dt.datetime.utcnow()\n )\n\n new_ts = TambonStore(\n store_id=item['id'],\n sub_district_id=item['sub_district_id'],\n district_id=item['district_id'],\n province_id=item['province_id'],\n updated_datetime=datetime.utcnow())\n\n existing_tambon = session.query(TambonGeo2).filter_by(sub_district_id = item['sub_district_id'],\n district_id=item['district_id'],\n province_id=item['province_id']).first()\n\n if existing_tambon:\n ## Store\n existing_store_info = session.query(FoodPandaStoreInfo2).filter_by(id=item['id']).first()\n existing_tambon_store = session.query(TambonStore).filter_by(store_id=item['id'],\n sub_district_id=item['sub_district_id'],\n district_id=item['district_id'],\n province_id=item['province_id']).first()\n if existing_store_info:\n session.merge(existing_store_info)\n if existing_tambon_store:\n session.merge(new_ts)\n else:\n session.add(new_ts)\n else:\n session.add(new_store_info)\n session.add(new_ts)\n\n menus = item.get('menus', [])\n for menu in menus:\n m = FoodPandaStoreMenu2(\n id=menu['id'],\n name=menu['name'],\n type=menu['type'],\n opening_time=menu['opening_time'],\n closing_time=menu['closing_time']\n )\n new_store_info.menus.append(m)\n\n\n else:\n print('{}, {}, {} is not persisted in TambonGeo'.format(item['sub_district_id'],\n item['district_id'],\n item['province_id']))\n\n\n session.commit()\n session.close()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
<|reserved_special_token_1|>
import os
from multiprocessing import Pool
import glob
import click
import logging
import pandas as pd
from src.resampling.resampling import Resampler
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores', type=click.INT, default=12, help=
'The number of workers for parallelization.')
@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),
help=
'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'
)
@click.option('--order', type=click.INT, nargs=1, default=3, help=
'The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores,
resampling, order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',
recursive=True)]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
<|reserved_special_token_1|>
import os
from multiprocessing import Pool
import glob
import click
import logging
import pandas as pd
from src.resampling.resampling import Resampler
# Default paths
path_in = 'data/hecktor_nii/'
path_out = 'data/resampled/'
path_bb = 'data/bbox.csv'
@click.command()
@click.argument('input_folder', type=click.Path(exists=True), default=path_in)
@click.argument('output_folder', type=click.Path(), default=path_out)
@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)
@click.option('--cores',
type=click.INT,
default=12,
help='The number of workers for parallelization.')
@click.option('--resampling',
type=click.FLOAT,
nargs=3,
default=(1, 1, 1),
help='Expect 3 positive floats describing the output '
'resolution of the resampling. To avoid resampling '
'on one or more dimension a value of -1 can be fed '
'e.g. --resampling 1.0 1.0 -1 will resample the x '
'and y axis at 1 mm/px and left the z axis untouched.')
@click.option('--order',
type=click.INT,
nargs=1,
default=3,
help='The order of the spline interpolation used to resample')
def main(input_folder, output_folder, bounding_boxes_file, cores, resampling,
order):
""" This command line interface allows to resample NIFTI files within a
given bounding box contain in BOUNDING_BOXES_FILE. The images are
resampled with spline interpolation
of degree --order (default=3) and the segmentation are resampled
by nearest neighbor interpolation.
INPUT_FOLDER is the path of the folder containing the NIFTI to
resample.
OUTPUT_FOLDER is the path of the folder where to store the
resampled NIFTI files.
BOUNDING_BOXES_FILE is the path of the .csv file containing the
bounding boxes of each patient.
"""
logger = logging.getLogger(__name__)
logger.info('Resampling')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
print('resampling is {}'.format(str(resampling)))
bb_df = pd.read_csv(bounding_boxes_file)
bb_df = bb_df.set_index('PatientID')
files_list = [
f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)
]
resampler = Resampler(bb_df, output_folder, order, resampling=resampling)
with Pool(cores) as p:
p.map(resampler, files_list)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logging.captureWarnings(True)
main()
|
flexible
|
{
"blob_id": "3479276d4769518aa60dcd4e1bb41a8a1a7d6517",
"index": 315,
"step-1": "<mask token>\n\n\n@click.command()\n@click.argument('input_folder', type=click.Path(exists=True), default=path_in)\n@click.argument('output_folder', type=click.Path(), default=path_out)\n@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)\n@click.option('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\n@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\n@click.option('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@click.command()\n@click.argument('input_folder', type=click.Path(exists=True), default=path_in)\n@click.argument('output_folder', type=click.Path(), default=path_out)\n@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)\n@click.option('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\n@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\n@click.option('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n",
"step-3": "<mask token>\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\n@click.command()\n@click.argument('input_folder', type=click.Path(exists=True), default=path_in)\n@click.argument('output_folder', type=click.Path(), default=path_out)\n@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)\n@click.option('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\n@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\n@click.option('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n",
"step-4": "import os\nfrom multiprocessing import Pool\nimport glob\nimport click\nimport logging\nimport pandas as pd\nfrom src.resampling.resampling import Resampler\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\n@click.command()\n@click.argument('input_folder', type=click.Path(exists=True), default=path_in)\n@click.argument('output_folder', type=click.Path(), default=path_out)\n@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)\n@click.option('--cores', type=click.INT, default=12, help=\n 'The number of workers for parallelization.')\n@click.option('--resampling', type=click.FLOAT, nargs=3, default=(1, 1, 1),\n help=\n 'Expect 3 positive floats describing the output resolution of the resampling. To avoid resampling on one or more dimension a value of -1 can be fed e.g. --resampling 1.0 1.0 -1 will resample the x and y axis at 1 mm/px and left the z axis untouched.'\n )\n@click.option('--order', type=click.INT, nargs=1, default=3, help=\n 'The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores,\n resampling, order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [f for f in glob.glob(input_folder + '/**/*.nii.gz',\n recursive=True)]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n main()\n",
"step-5": "import os\nfrom multiprocessing import Pool\nimport glob\n\nimport click\nimport logging\nimport pandas as pd\n\nfrom src.resampling.resampling import Resampler\n\n# Default paths\npath_in = 'data/hecktor_nii/'\npath_out = 'data/resampled/'\npath_bb = 'data/bbox.csv'\n\n\n@click.command()\n@click.argument('input_folder', type=click.Path(exists=True), default=path_in)\n@click.argument('output_folder', type=click.Path(), default=path_out)\n@click.argument('bounding_boxes_file', type=click.Path(), default=path_bb)\n@click.option('--cores',\n type=click.INT,\n default=12,\n help='The number of workers for parallelization.')\n@click.option('--resampling',\n type=click.FLOAT,\n nargs=3,\n default=(1, 1, 1),\n help='Expect 3 positive floats describing the output '\n 'resolution of the resampling. To avoid resampling '\n 'on one or more dimension a value of -1 can be fed '\n 'e.g. --resampling 1.0 1.0 -1 will resample the x '\n 'and y axis at 1 mm/px and left the z axis untouched.')\n@click.option('--order',\n type=click.INT,\n nargs=1,\n default=3,\n help='The order of the spline interpolation used to resample')\ndef main(input_folder, output_folder, bounding_boxes_file, cores, resampling,\n order):\n \"\"\" This command line interface allows to resample NIFTI files within a\n given bounding box contain in BOUNDING_BOXES_FILE. The images are\n resampled with spline interpolation\n of degree --order (default=3) and the segmentation are resampled\n by nearest neighbor interpolation.\n\n INPUT_FOLDER is the path of the folder containing the NIFTI to\n resample.\n OUTPUT_FOLDER is the path of the folder where to store the\n resampled NIFTI files.\n BOUNDING_BOXES_FILE is the path of the .csv file containing the\n bounding boxes of each patient.\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('Resampling')\n\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n print('resampling is {}'.format(str(resampling)))\n bb_df = pd.read_csv(bounding_boxes_file)\n bb_df = bb_df.set_index('PatientID')\n files_list = [\n f for f in glob.glob(input_folder + '/**/*.nii.gz', recursive=True)\n ]\n resampler = Resampler(bb_df, output_folder, order, resampling=resampling)\n with Pool(cores) as p:\n p.map(resampler, files_list)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logging.captureWarnings(True)\n\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Maarten Los
# See LICENSE.rst for details.
class Defaults(object):
INBUS_VERSION = 2
LOCALHOST = "127.0.0.1"
PORT = 7222
INBUS_ADDRESS = (LOCALHOST, PORT)
BUFFER_SIZE = 65536
|
normal
|
{
"blob_id": "bc087482e901ce1831cef56aa9c7aef0c8f2d15a",
"index": 1793,
"step-1": "<mask token>\n",
"step-2": "class Defaults(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class Defaults(object):\n INBUS_VERSION = 2\n LOCALHOST = '127.0.0.1'\n PORT = 7222\n INBUS_ADDRESS = LOCALHOST, PORT\n BUFFER_SIZE = 65536\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2017 Maarten Los\n# See LICENSE.rst for details.\n\n\nclass Defaults(object):\n INBUS_VERSION = 2\n LOCALHOST = \"127.0.0.1\"\n PORT = 7222\n INBUS_ADDRESS = (LOCALHOST, PORT)\n BUFFER_SIZE = 65536\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class HackerNews(object):
<|reserved_special_token_0|>
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(data.get(
'time', 0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.descendants = data.get('descendants')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title
)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HackerNews(object):
<|reserved_special_token_0|>
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
def _get_page(self, page):
return self._get('{0}{1}.json'.format(self.base_url, page))
def _get_page_param(self, page, param):
return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))
def get_item(self, item_id):
"""Returns Hacker News `Item` object.
Args:
item_id (int or string): Unique item id of Hacker News story, comment etc.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
response = self._get_page_param('item', item_id).json()
if not response:
raise InvalidItemID
return Item(response)
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ask_stories(self, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Ask HN stories.
"""
return self._get_page('askstories').json()[:limit]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Returns:
`dict` with two keys whose values are `list` objects
"""
return self._get_page('updates').json()
<|reserved_special_token_0|>
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(data.get(
'time', 0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.descendants = data.get('descendants')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title
)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version. Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
def _get_page(self, page):
return self._get('{0}{1}.json'.format(self.base_url, page))
def _get_page_param(self, page, param):
return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))
def get_item(self, item_id):
"""Returns Hacker News `Item` object.
Args:
item_id (int or string): Unique item id of Hacker News story, comment etc.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
response = self._get_page_param('item', item_id).json()
if not response:
raise InvalidItemID
return Item(response)
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def ask_stories(self, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Ask HN stories.
"""
return self._get_page('askstories').json()[:limit]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Returns:
`dict` with two keys whose values are `list` objects
"""
return self._get_page('updates').json()
<|reserved_special_token_0|>
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(data.get(
'time', 0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.descendants = data.get('descendants')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title
)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version. Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
def _get_page(self, page):
return self._get('{0}{1}.json'.format(self.base_url, page))
def _get_page_param(self, page, param):
return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))
def get_item(self, item_id):
"""Returns Hacker News `Item` object.
Args:
item_id (int or string): Unique item id of Hacker News story, comment etc.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
response = self._get_page_param('item', item_id).json()
if not response:
raise InvalidItemID
return Item(response)
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
def top_stories(self, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of top stories.
"""
return self._get_page('topstories').json()[:limit]
def new_stories(self, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of new stories.
"""
return self._get_page('newstories').json()[:limit]
def ask_stories(self, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Ask HN stories.
"""
return self._get_page('askstories').json()[:limit]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Returns:
`dict` with two keys whose values are `list` objects
"""
return self._get_page('updates').json()
def get_max_item(self):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`int` if successful.
"""
return self._get_page('maxitem').json()
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(data.get(
'time', 0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.descendants = data.get('descendants')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title
)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
haxor
Unofficial Python wrapper for official Hacker News API
@author avinash sajjanshetty
@email hi@avi.im
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import json
import sys
import requests
from .settings import supported_api_versions
__all__ = [
'User',
'Item',
'HackerNews',
'InvalidAPIVersion',
'InvalidItemID',
'InvalidUserID']
class InvalidItemID(Exception):
pass
class InvalidUserID(Exception):
pass
class InvalidAPIVersion(Exception):
pass
class HTTPError(Exception):
pass
class HackerNews(object):
def __init__(self, version='v0'):
"""
Args:
version (string): specifies Hacker News API version. Default is `v0`.
Raises:
InvalidAPIVersion: If Hacker News version is not supported.
"""
try:
self.base_url = supported_api_versions[version]
except KeyError:
raise InvalidAPIVersion
def _get(self, url):
"""Internal method used for GET requests
Args:
url (string): URL to send GET.
Returns:
requests' response object
Raises:
HTTPError: If HTTP request failed.
"""
response = requests.get(url)
if response.status_code == requests.codes.ok:
return response
else:
raise HTTPError
def _get_page(self, page):
return self._get('{0}{1}.json'.format(self.base_url, page))
def _get_page_param(self, page, param):
return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))
def get_item(self, item_id):
"""Returns Hacker News `Item` object.
Args:
item_id (int or string): Unique item id of Hacker News story, comment etc.
Returns:
`Item` object representing Hacker News item.
Raises:
InvalidItemID: If corresponding Hacker News story does not exist.
"""
response = self._get_page_param('item', item_id).json()
if not response:
raise InvalidItemID
return Item(response)
def get_user(self, user_id):
"""Returns Hacker News `User` object.
Args:
user_id (string): unique user id of a Hacker News user.
Returns:
`User` object representing a user on Hacker News.
Raises:
InvalidUserID: If no such user exists on Hacker News.
"""
response = self._get_page_param('user', user_id).json()
if not response:
raise InvalidUserID
return User(response)
def top_stories(self, limit=None):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of top stories.
"""
return self._get_page('topstories').json()[:limit]
def new_stories(self, limit=None):
"""Returns list of item ids of current new stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of new stories.
"""
return self._get_page('newstories').json()[:limit]
def ask_stories(self, limit=None):
"""Returns list of item ids of latest Ask HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Ask HN stories.
"""
return self._get_page('askstories').json()[:limit]
def show_stories(self, limit=None):
"""Returns list of item ids of latest Show HN stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Show HN stories.
"""
return self._get_page('showstories').json()[:limit]
def job_stories(self, limit=None):
"""Returns list of item ids of latest Job stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`list` object containing ids of Job stories.
"""
return self._get_page('jobstories').json()[:limit]
def updates(self):
"""Returns list of item ids and user ids that have been
changed/updated recently.
Returns:
`dict` with two keys whose values are `list` objects
"""
return self._get_page('updates').json()
def get_max_item(self):
"""Returns list of item ids of current top stories
Args:
limit (int): specifies the number of stories to be returned.
Returns:
`int` if successful.
"""
return self._get_page('maxitem').json()
class Item(object):
"""
Represents stories, comments, jobs, Ask HNs and polls
"""
def __init__(self, data):
self.item_id = data.get('id')
self.deleted = data.get('deleted')
self.item_type = data.get('type')
self.by = data.get('by')
self.submission_time = datetime.datetime.fromtimestamp(
data.get(
'time',
0))
self.text = data.get('text')
self.dead = data.get('dead')
self.parent = data.get('parent')
self.kids = data.get('kids')
self.descendants = data.get('descendants')
self.url = data.get('url')
self.score = data.get('score')
self.title = data.get('title')
self.parts = data.get('parts')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.Item: {0} - {1}>'.format(
self.item_id, self.title)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
class User(object):
"""
Represents a hacker i.e. a user on Hacker News
"""
def __init__(self, data):
self.user_id = data.get('id')
self.delay = data.get('delay')
self.created = datetime.datetime.fromtimestamp(data.get('created', 0))
self.karma = data.get('karma')
self.about = data.get('about')
self.submitted = data.get('submitted')
self.raw = json.dumps(data)
def __repr__(self):
retval = '<hackernews.User: {0}>'.format(self.user_id)
if sys.version_info.major < 3:
return retval.encode('utf-8', errors='backslashreplace')
return retval
|
flexible
|
{
"blob_id": "e14c7eb11c06d6de5c2f9f8adfb8b742fcb432e1",
"index": 8073,
"step-1": "<mask token>\n\n\nclass HackerNews(object):\n <mask token>\n\n def _get(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (string): URL to send GET.\n\n Returns:\n requests' response object\n\n Raises:\n HTTPError: If HTTP request failed.\n\n \"\"\"\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError\n <mask token>\n <mask token>\n <mask token>\n\n def get_user(self, user_id):\n \"\"\"Returns Hacker News `User` object.\n\n Args:\n user_id (string): unique user id of a Hacker News user.\n\n Returns:\n `User` object representing a user on Hacker News.\n\n Raises:\n InvalidUserID: If no such user exists on Hacker News.\n\n \"\"\"\n response = self._get_page_param('user', user_id).json()\n if not response:\n raise InvalidUserID\n return User(response)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Item(object):\n \"\"\"\n Represents stories, comments, jobs, Ask HNs and polls\n \"\"\"\n\n def __init__(self, data):\n self.item_id = data.get('id')\n self.deleted = data.get('deleted')\n self.item_type = data.get('type')\n self.by = data.get('by')\n self.submission_time = datetime.datetime.fromtimestamp(data.get(\n 'time', 0))\n self.text = data.get('text')\n self.dead = data.get('dead')\n self.parent = data.get('parent')\n self.kids = data.get('kids')\n self.descendants = data.get('descendants')\n self.url = data.get('url')\n self.score = data.get('score')\n self.title = data.get('title')\n self.parts = data.get('parts')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title\n )\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n\n\nclass User(object):\n \"\"\"\n Represents a hacker i.e. a user on Hacker News\n \"\"\"\n\n def __init__(self, data):\n self.user_id = data.get('id')\n self.delay = data.get('delay')\n self.created = datetime.datetime.fromtimestamp(data.get('created', 0))\n self.karma = data.get('karma')\n self.about = data.get('about')\n self.submitted = data.get('submitted')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.User: {0}>'.format(self.user_id)\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n",
"step-2": "<mask token>\n\n\nclass HackerNews(object):\n <mask token>\n\n def _get(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (string): URL to send GET.\n\n Returns:\n requests' response object\n\n Raises:\n HTTPError: If HTTP request failed.\n\n \"\"\"\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError\n\n def _get_page(self, page):\n return self._get('{0}{1}.json'.format(self.base_url, page))\n\n def _get_page_param(self, page, param):\n return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))\n\n def get_item(self, item_id):\n \"\"\"Returns Hacker News `Item` object.\n\n Args:\n item_id (int or string): Unique item id of Hacker News story, comment etc.\n\n Returns:\n `Item` object representing Hacker News item.\n\n Raises:\n InvalidItemID: If corresponding Hacker News story does not exist.\n\n \"\"\"\n response = self._get_page_param('item', item_id).json()\n if not response:\n raise InvalidItemID\n return Item(response)\n\n def get_user(self, user_id):\n \"\"\"Returns Hacker News `User` object.\n\n Args:\n user_id (string): unique user id of a Hacker News user.\n\n Returns:\n `User` object representing a user on Hacker News.\n\n Raises:\n InvalidUserID: If no such user exists on Hacker News.\n\n \"\"\"\n response = self._get_page_param('user', user_id).json()\n if not response:\n raise InvalidUserID\n return User(response)\n <mask token>\n <mask token>\n\n def ask_stories(self, limit=None):\n \"\"\"Returns list of item ids of latest Ask HN stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of Ask HN stories.\n \"\"\"\n return self._get_page('askstories').json()[:limit]\n <mask token>\n <mask token>\n\n def updates(self):\n \"\"\"Returns list of item ids and user ids that have been\n changed/updated recently.\n\n Returns:\n `dict` with two keys whose values are `list` objects\n \"\"\"\n return self._get_page('updates').json()\n <mask token>\n\n\nclass Item(object):\n \"\"\"\n Represents stories, comments, jobs, Ask HNs and polls\n \"\"\"\n\n def __init__(self, data):\n self.item_id = data.get('id')\n self.deleted = data.get('deleted')\n self.item_type = data.get('type')\n self.by = data.get('by')\n self.submission_time = datetime.datetime.fromtimestamp(data.get(\n 'time', 0))\n self.text = data.get('text')\n self.dead = data.get('dead')\n self.parent = data.get('parent')\n self.kids = data.get('kids')\n self.descendants = data.get('descendants')\n self.url = data.get('url')\n self.score = data.get('score')\n self.title = data.get('title')\n self.parts = data.get('parts')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title\n )\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n\n\nclass User(object):\n \"\"\"\n Represents a hacker i.e. a user on Hacker News\n \"\"\"\n\n def __init__(self, data):\n self.user_id = data.get('id')\n self.delay = data.get('delay')\n self.created = datetime.datetime.fromtimestamp(data.get('created', 0))\n self.karma = data.get('karma')\n self.about = data.get('about')\n self.submitted = data.get('submitted')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.User: {0}>'.format(self.user_id)\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n",
"step-3": "<mask token>\n\n\nclass HackerNews(object):\n\n def __init__(self, version='v0'):\n \"\"\"\n Args:\n version (string): specifies Hacker News API version. Default is `v0`.\n\n Raises:\n InvalidAPIVersion: If Hacker News version is not supported.\n\n \"\"\"\n try:\n self.base_url = supported_api_versions[version]\n except KeyError:\n raise InvalidAPIVersion\n\n def _get(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (string): URL to send GET.\n\n Returns:\n requests' response object\n\n Raises:\n HTTPError: If HTTP request failed.\n\n \"\"\"\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError\n\n def _get_page(self, page):\n return self._get('{0}{1}.json'.format(self.base_url, page))\n\n def _get_page_param(self, page, param):\n return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))\n\n def get_item(self, item_id):\n \"\"\"Returns Hacker News `Item` object.\n\n Args:\n item_id (int or string): Unique item id of Hacker News story, comment etc.\n\n Returns:\n `Item` object representing Hacker News item.\n\n Raises:\n InvalidItemID: If corresponding Hacker News story does not exist.\n\n \"\"\"\n response = self._get_page_param('item', item_id).json()\n if not response:\n raise InvalidItemID\n return Item(response)\n\n def get_user(self, user_id):\n \"\"\"Returns Hacker News `User` object.\n\n Args:\n user_id (string): unique user id of a Hacker News user.\n\n Returns:\n `User` object representing a user on Hacker News.\n\n Raises:\n InvalidUserID: If no such user exists on Hacker News.\n\n \"\"\"\n response = self._get_page_param('user', user_id).json()\n if not response:\n raise InvalidUserID\n return User(response)\n <mask token>\n <mask token>\n\n def ask_stories(self, limit=None):\n \"\"\"Returns list of item ids of latest Ask HN stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of Ask HN stories.\n \"\"\"\n return self._get_page('askstories').json()[:limit]\n <mask token>\n <mask token>\n\n def updates(self):\n \"\"\"Returns list of item ids and user ids that have been\n changed/updated recently.\n\n Returns:\n `dict` with two keys whose values are `list` objects\n \"\"\"\n return self._get_page('updates').json()\n <mask token>\n\n\nclass Item(object):\n \"\"\"\n Represents stories, comments, jobs, Ask HNs and polls\n \"\"\"\n\n def __init__(self, data):\n self.item_id = data.get('id')\n self.deleted = data.get('deleted')\n self.item_type = data.get('type')\n self.by = data.get('by')\n self.submission_time = datetime.datetime.fromtimestamp(data.get(\n 'time', 0))\n self.text = data.get('text')\n self.dead = data.get('dead')\n self.parent = data.get('parent')\n self.kids = data.get('kids')\n self.descendants = data.get('descendants')\n self.url = data.get('url')\n self.score = data.get('score')\n self.title = data.get('title')\n self.parts = data.get('parts')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title\n )\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n\n\nclass User(object):\n \"\"\"\n Represents a hacker i.e. a user on Hacker News\n \"\"\"\n\n def __init__(self, data):\n self.user_id = data.get('id')\n self.delay = data.get('delay')\n self.created = datetime.datetime.fromtimestamp(data.get('created', 0))\n self.karma = data.get('karma')\n self.about = data.get('about')\n self.submitted = data.get('submitted')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.User: {0}>'.format(self.user_id)\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n",
"step-4": "<mask token>\n\n\nclass HackerNews(object):\n\n def __init__(self, version='v0'):\n \"\"\"\n Args:\n version (string): specifies Hacker News API version. Default is `v0`.\n\n Raises:\n InvalidAPIVersion: If Hacker News version is not supported.\n\n \"\"\"\n try:\n self.base_url = supported_api_versions[version]\n except KeyError:\n raise InvalidAPIVersion\n\n def _get(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (string): URL to send GET.\n\n Returns:\n requests' response object\n\n Raises:\n HTTPError: If HTTP request failed.\n\n \"\"\"\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError\n\n def _get_page(self, page):\n return self._get('{0}{1}.json'.format(self.base_url, page))\n\n def _get_page_param(self, page, param):\n return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))\n\n def get_item(self, item_id):\n \"\"\"Returns Hacker News `Item` object.\n\n Args:\n item_id (int or string): Unique item id of Hacker News story, comment etc.\n\n Returns:\n `Item` object representing Hacker News item.\n\n Raises:\n InvalidItemID: If corresponding Hacker News story does not exist.\n\n \"\"\"\n response = self._get_page_param('item', item_id).json()\n if not response:\n raise InvalidItemID\n return Item(response)\n\n def get_user(self, user_id):\n \"\"\"Returns Hacker News `User` object.\n\n Args:\n user_id (string): unique user id of a Hacker News user.\n\n Returns:\n `User` object representing a user on Hacker News.\n\n Raises:\n InvalidUserID: If no such user exists on Hacker News.\n\n \"\"\"\n response = self._get_page_param('user', user_id).json()\n if not response:\n raise InvalidUserID\n return User(response)\n\n def top_stories(self, limit=None):\n \"\"\"Returns list of item ids of current top stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of top stories.\n \"\"\"\n return self._get_page('topstories').json()[:limit]\n\n def new_stories(self, limit=None):\n \"\"\"Returns list of item ids of current new stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of new stories.\n \"\"\"\n return self._get_page('newstories').json()[:limit]\n\n def ask_stories(self, limit=None):\n \"\"\"Returns list of item ids of latest Ask HN stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of Ask HN stories.\n \"\"\"\n return self._get_page('askstories').json()[:limit]\n <mask token>\n <mask token>\n\n def updates(self):\n \"\"\"Returns list of item ids and user ids that have been\n changed/updated recently.\n\n Returns:\n `dict` with two keys whose values are `list` objects\n \"\"\"\n return self._get_page('updates').json()\n\n def get_max_item(self):\n \"\"\"Returns list of item ids of current top stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `int` if successful.\n \"\"\"\n return self._get_page('maxitem').json()\n\n\nclass Item(object):\n \"\"\"\n Represents stories, comments, jobs, Ask HNs and polls\n \"\"\"\n\n def __init__(self, data):\n self.item_id = data.get('id')\n self.deleted = data.get('deleted')\n self.item_type = data.get('type')\n self.by = data.get('by')\n self.submission_time = datetime.datetime.fromtimestamp(data.get(\n 'time', 0))\n self.text = data.get('text')\n self.dead = data.get('dead')\n self.parent = data.get('parent')\n self.kids = data.get('kids')\n self.descendants = data.get('descendants')\n self.url = data.get('url')\n self.score = data.get('score')\n self.title = data.get('title')\n self.parts = data.get('parts')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.Item: {0} - {1}>'.format(self.item_id, self.title\n )\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n\n\nclass User(object):\n \"\"\"\n Represents a hacker i.e. a user on Hacker News\n \"\"\"\n\n def __init__(self, data):\n self.user_id = data.get('id')\n self.delay = data.get('delay')\n self.created = datetime.datetime.fromtimestamp(data.get('created', 0))\n self.karma = data.get('karma')\n self.about = data.get('about')\n self.submitted = data.get('submitted')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.User: {0}>'.format(self.user_id)\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"\nhaxor\nUnofficial Python wrapper for official Hacker News API\n\n@author avinash sajjanshetty\n@email hi@avi.im\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport datetime\nimport json\nimport sys\n\nimport requests\n\nfrom .settings import supported_api_versions\n\n__all__ = [\n 'User',\n 'Item',\n 'HackerNews',\n 'InvalidAPIVersion',\n 'InvalidItemID',\n 'InvalidUserID']\n\n\nclass InvalidItemID(Exception):\n pass\n\n\nclass InvalidUserID(Exception):\n pass\n\n\nclass InvalidAPIVersion(Exception):\n pass\n\n\nclass HTTPError(Exception):\n pass\n\n\nclass HackerNews(object):\n\n def __init__(self, version='v0'):\n \"\"\"\n Args:\n version (string): specifies Hacker News API version. Default is `v0`.\n\n Raises:\n InvalidAPIVersion: If Hacker News version is not supported.\n\n \"\"\"\n try:\n self.base_url = supported_api_versions[version]\n except KeyError:\n raise InvalidAPIVersion\n\n def _get(self, url):\n \"\"\"Internal method used for GET requests\n\n Args:\n url (string): URL to send GET.\n\n Returns:\n requests' response object\n\n Raises:\n HTTPError: If HTTP request failed.\n\n \"\"\"\n response = requests.get(url)\n if response.status_code == requests.codes.ok:\n return response\n else:\n raise HTTPError\n\n def _get_page(self, page):\n return self._get('{0}{1}.json'.format(self.base_url, page))\n\n def _get_page_param(self, page, param):\n return self._get('{0}{1}/{2}.json'.format(self.base_url, page, param))\n\n def get_item(self, item_id):\n \"\"\"Returns Hacker News `Item` object.\n\n Args:\n item_id (int or string): Unique item id of Hacker News story, comment etc.\n\n Returns:\n `Item` object representing Hacker News item.\n\n Raises:\n InvalidItemID: If corresponding Hacker News story does not exist.\n\n \"\"\"\n\n response = self._get_page_param('item', item_id).json()\n\n if not response:\n raise InvalidItemID\n\n return Item(response)\n\n def get_user(self, user_id):\n \"\"\"Returns Hacker News `User` object.\n\n Args:\n user_id (string): unique user id of a Hacker News user.\n\n Returns:\n `User` object representing a user on Hacker News.\n\n Raises:\n InvalidUserID: If no such user exists on Hacker News.\n\n \"\"\"\n response = self._get_page_param('user', user_id).json()\n\n if not response:\n raise InvalidUserID\n\n return User(response)\n\n def top_stories(self, limit=None):\n \"\"\"Returns list of item ids of current top stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of top stories.\n \"\"\"\n return self._get_page('topstories').json()[:limit]\n\n def new_stories(self, limit=None):\n \"\"\"Returns list of item ids of current new stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of new stories.\n \"\"\"\n return self._get_page('newstories').json()[:limit]\n\n def ask_stories(self, limit=None):\n \"\"\"Returns list of item ids of latest Ask HN stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of Ask HN stories.\n \"\"\"\n return self._get_page('askstories').json()[:limit]\n\n def show_stories(self, limit=None):\n \"\"\"Returns list of item ids of latest Show HN stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of Show HN stories.\n \"\"\"\n return self._get_page('showstories').json()[:limit]\n\n def job_stories(self, limit=None):\n \"\"\"Returns list of item ids of latest Job stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `list` object containing ids of Job stories.\n \"\"\"\n return self._get_page('jobstories').json()[:limit]\n\n def updates(self):\n \"\"\"Returns list of item ids and user ids that have been\n changed/updated recently.\n\n Returns:\n `dict` with two keys whose values are `list` objects\n \"\"\"\n return self._get_page('updates').json()\n\n def get_max_item(self):\n \"\"\"Returns list of item ids of current top stories\n\n Args:\n limit (int): specifies the number of stories to be returned.\n\n Returns:\n `int` if successful.\n \"\"\"\n return self._get_page('maxitem').json()\n\n\nclass Item(object):\n\n \"\"\"\n Represents stories, comments, jobs, Ask HNs and polls\n \"\"\"\n\n def __init__(self, data):\n self.item_id = data.get('id')\n self.deleted = data.get('deleted')\n self.item_type = data.get('type')\n self.by = data.get('by')\n self.submission_time = datetime.datetime.fromtimestamp(\n data.get(\n 'time',\n 0))\n self.text = data.get('text')\n self.dead = data.get('dead')\n self.parent = data.get('parent')\n self.kids = data.get('kids')\n self.descendants = data.get('descendants')\n self.url = data.get('url')\n self.score = data.get('score')\n self.title = data.get('title')\n self.parts = data.get('parts')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.Item: {0} - {1}>'.format(\n self.item_id, self.title)\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n\n\nclass User(object):\n\n \"\"\"\n Represents a hacker i.e. a user on Hacker News\n \"\"\"\n\n def __init__(self, data):\n self.user_id = data.get('id')\n self.delay = data.get('delay')\n self.created = datetime.datetime.fromtimestamp(data.get('created', 0))\n self.karma = data.get('karma')\n self.about = data.get('about')\n self.submitted = data.get('submitted')\n self.raw = json.dumps(data)\n\n def __repr__(self):\n retval = '<hackernews.User: {0}>'.format(self.user_id)\n if sys.version_info.major < 3:\n return retval.encode('utf-8', errors='backslashreplace')\n return retval\n",
"step-ids": [
11,
16,
17,
20,
29
]
}
|
[
11,
16,
17,
20,
29
] |
import webapp2
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
app = webapp2.WSGIApplication([
('/blog', RedirectToSiteRootHandler),
('/blog/', RedirectToSiteRootHandler),
('(.*[^/])', AppendTrailingSlashHandler),
], debug=True)
|
normal
|
{
"blob_id": "064792a6aba96a679bec606a85b19d4925861f7d",
"index": 2493,
"step-1": "<mask token>\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n <mask token>\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n\n def get(self):\n self.response.set_status(301)\n self.response.headers['Location'] = '/'\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\n<mask token>\n",
"step-4": "import webapp2\n\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n\n def get(self):\n self.response.set_status(301)\n self.response.headers['Location'] = '/'\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\napp = webapp2.WSGIApplication([('/blog', RedirectToSiteRootHandler), (\n '/blog/', RedirectToSiteRootHandler), ('(.*[^/])',\n AppendTrailingSlashHandler)], debug=True)\n",
"step-5": "import webapp2\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n def get(self):\n self.response.set_status(301)\n self.response.headers['Location'] = '/'\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\napp = webapp2.WSGIApplication([\n ('/blog', RedirectToSiteRootHandler),\n ('/blog/', RedirectToSiteRootHandler),\n ('(.*[^/])', AppendTrailingSlashHandler),\n], debug=True)\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
def __get__(self, instance, cls):
self.instance = instance
return self
def __call__(self, *args):
key = args
try:
return self.results[key]
except KeyError:
self.results[key] = self.func(self.instance, *args)
return self.results[key]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def do_cprofile(filename):
"""
decorator for function profiling
:param filename:
:return:
"""
def wrapper(func):
@functools.wraps(func)
def profiled_func(*args, **kwargs):
DO_PROF = True
if DO_PROF:
profile = cProfile.Profile()
profile.enable()
result = func(*args, **kwargs)
profile.disable()
sortby = 'tottime'
ps = pstats.Stats(profile).sort_stats(sortby)
ps.dump_stats(filename)
else:
result = func(*args, **kwargs)
return result
return profiled_func
return wrapper
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
def __get__(self, instance, cls):
self.instance = instance
return self
def __call__(self, *args):
key = args
try:
return self.results[key]
except KeyError:
self.results[key] = self.func(self.instance, *args)
return self.results[key]
@do_cprofile('./ff.prof')
def f(n):
if n < 2:
return n
return f(n - 2) + f(n - 1)
f(5)
f(5)
<|reserved_special_token_1|>
import cProfile
import re
import pstats
import os
import functools
def do_cprofile(filename):
"""
decorator for function profiling
:param filename:
:return:
"""
def wrapper(func):
@functools.wraps(func)
def profiled_func(*args, **kwargs):
DO_PROF = True
if DO_PROF:
profile = cProfile.Profile()
profile.enable()
result = func(*args, **kwargs)
profile.disable()
sortby = 'tottime'
ps = pstats.Stats(profile).sort_stats(sortby)
ps.dump_stats(filename)
else:
result = func(*args, **kwargs)
return result
return profiled_func
return wrapper
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
def __get__(self, instance, cls):
self.instance = instance
return self
def __call__(self, *args):
key = args
try:
return self.results[key]
except KeyError:
self.results[key] = self.func(self.instance, *args)
return self.results[key]
@do_cprofile('./ff.prof')
def f(n):
if n < 2:
return n
return f(n - 2) + f(n - 1)
f(5)
f(5)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cProfile
import re
import pstats
import os
import functools
# cProfile.run('re.compile("foo|bar")')
def do_cprofile(filename):
"""
decorator for function profiling
:param filename:
:return:
"""
def wrapper(func):
@functools.wraps(func)
def profiled_func(*args, **kwargs):
# Flag for do profiling or not.
# DO_PROF = os.getenv('PROFILING')
DO_PROF = True
if DO_PROF:
profile = cProfile.Profile()
profile.enable()
result = func(*args, **kwargs)
profile.disable()
# Sort stat by internal time.
sortby = 'tottime'
ps = pstats.Stats(profile).sort_stats(sortby)
ps.dump_stats(filename)
else:
result = func(*args, **kwargs)
return result
return profiled_func
return wrapper
# print(f(5))
# A sample of catch the return result
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
def __get__(self, instance, cls):
self.instance = instance
return self
def __call__(self, *args):
key = args
try:
return self.results[key]
except KeyError:
self.results[key] = self.func(self.instance, *args)
return self.results[key]
@do_cprofile('./ff.prof')
# @Memoized
def f(n):
if n < 2:
return n
return f(n - 2) + f(n - 1)
f(5)
f(5)
|
flexible
|
{
"blob_id": "8c055816def1c0a19e672ab4386f9b9a345b6323",
"index": 7837,
"step-1": "<mask token>\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef do_cprofile(filename):\n \"\"\"\n decorator for function profiling\n :param filename: \n :return: \n \"\"\"\n\n def wrapper(func):\n\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n return profiled_func\n return wrapper\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n@do_cprofile('./ff.prof')\ndef f(n):\n if n < 2:\n return n\n return f(n - 2) + f(n - 1)\n\n\nf(5)\nf(5)\n",
"step-4": "import cProfile\nimport re\nimport pstats\nimport os\nimport functools\n\n\ndef do_cprofile(filename):\n \"\"\"\n decorator for function profiling\n :param filename: \n :return: \n \"\"\"\n\n def wrapper(func):\n\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n return profiled_func\n return wrapper\n\n\nclass Memoized(object):\n\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n@do_cprofile('./ff.prof')\ndef f(n):\n if n < 2:\n return n\n return f(n - 2) + f(n - 1)\n\n\nf(5)\nf(5)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport cProfile\nimport re\nimport pstats\nimport os\nimport functools\n\n\n# cProfile.run('re.compile(\"foo|bar\")')\n\ndef do_cprofile(filename):\n \"\"\"\n decorator for function profiling\n :param filename: \n :return: \n \"\"\"\n\n def wrapper(func):\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n # Flag for do profiling or not.\n # DO_PROF = os.getenv('PROFILING')\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n # Sort stat by internal time.\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n\n return profiled_func\n\n return wrapper\n\n\n# print(f(5))\n\n\n# A sample of catch the return result\nclass Memoized(object):\n def __init__(self, func):\n self.func = func\n self.results = {}\n\n def __get__(self, instance, cls):\n self.instance = instance\n return self\n\n def __call__(self, *args):\n key = args\n try:\n return self.results[key]\n except KeyError:\n self.results[key] = self.func(self.instance, *args)\n return self.results[key]\n\n\n@do_cprofile('./ff.prof')\n# @Memoized\ndef f(n):\n if n < 2:\n return n\n return f(n - 2) + f(n - 1)\n\n\nf(5)\nf(5)\n",
"step-ids": [
2,
4,
7,
8,
9
]
}
|
[
2,
4,
7,
8,
9
] |
<|reserved_special_token_0|>
class Character:
def __init__(self, screen, side_length, border_width, valid_points,
start_point, end_point, current_position, a_colour, na_colour, keys
=None, k_colour=None):
self.screen = screen
self.side_length = side_length
self.border_width = border_width
self.start_point = start_point
self.end_point = end_point
self.current_position = current_position
self.a_colour = a_colour
self.na_colour = na_colour
self.draw_position()
<|reserved_special_token_0|>
def move_character(self, next_position):
current_rect = [self.border_width + (self.side_length + self.
border_width) * self.current_position[0], self.border_width + (
self.side_length + self.border_width) * self.current_position[1
], self.side_length, self.side_length]
next_rect = [self.border_width + (self.side_length + self.
border_width) * next_position[0], self.border_width + (self.
side_length + self.border_width) * next_position[1], self.
side_length, self.side_length]
pygame.draw.rect(self.screen, self.na_colour, current_rect)
pygame.display.update(current_rect)
pygame.draw.rect(self.screen, self.a_colour, next_rect)
pygame.display.update(next_rect)
self.current_position = next_position
def move_character_smooth(self, next_position, steps):
if next_position[0] != self.current_position[0]:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[0] - self.current_position[0]
) * i / steps
next_pos = self.current_position[0
] + difference, self.current_position[1]
self.move_character(next_pos)
else:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[1] - self.current_position[1]
) * i / steps
next_pos = self.current_position[0], self.current_position[1
] + difference
self.move_character(next_pos)
def get_current_position(self):
return self.current_position
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Character:
def __init__(self, screen, side_length, border_width, valid_points,
start_point, end_point, current_position, a_colour, na_colour, keys
=None, k_colour=None):
self.screen = screen
self.side_length = side_length
self.border_width = border_width
self.start_point = start_point
self.end_point = end_point
self.current_position = current_position
self.a_colour = a_colour
self.na_colour = na_colour
self.draw_position()
def draw_position(self):
pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (
self.side_length + self.border_width) * self.current_position[0
], self.border_width + (self.side_length + self.border_width) *
self.current_position[1], self.side_length, self.side_length])
def move_character(self, next_position):
current_rect = [self.border_width + (self.side_length + self.
border_width) * self.current_position[0], self.border_width + (
self.side_length + self.border_width) * self.current_position[1
], self.side_length, self.side_length]
next_rect = [self.border_width + (self.side_length + self.
border_width) * next_position[0], self.border_width + (self.
side_length + self.border_width) * next_position[1], self.
side_length, self.side_length]
pygame.draw.rect(self.screen, self.na_colour, current_rect)
pygame.display.update(current_rect)
pygame.draw.rect(self.screen, self.a_colour, next_rect)
pygame.display.update(next_rect)
self.current_position = next_position
def move_character_smooth(self, next_position, steps):
if next_position[0] != self.current_position[0]:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[0] - self.current_position[0]
) * i / steps
next_pos = self.current_position[0
] + difference, self.current_position[1]
self.move_character(next_pos)
else:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[1] - self.current_position[1]
) * i / steps
next_pos = self.current_position[0], self.current_position[1
] + difference
self.move_character(next_pos)
def get_current_position(self):
return self.current_position
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Character:
def __init__(self, screen, side_length, border_width, valid_points,
start_point, end_point, current_position, a_colour, na_colour, keys
=None, k_colour=None):
self.screen = screen
self.side_length = side_length
self.border_width = border_width
self.start_point = start_point
self.end_point = end_point
self.current_position = current_position
self.a_colour = a_colour
self.na_colour = na_colour
self.draw_position()
def draw_position(self):
pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (
self.side_length + self.border_width) * self.current_position[0
], self.border_width + (self.side_length + self.border_width) *
self.current_position[1], self.side_length, self.side_length])
def move_character(self, next_position):
current_rect = [self.border_width + (self.side_length + self.
border_width) * self.current_position[0], self.border_width + (
self.side_length + self.border_width) * self.current_position[1
], self.side_length, self.side_length]
next_rect = [self.border_width + (self.side_length + self.
border_width) * next_position[0], self.border_width + (self.
side_length + self.border_width) * next_position[1], self.
side_length, self.side_length]
pygame.draw.rect(self.screen, self.na_colour, current_rect)
pygame.display.update(current_rect)
pygame.draw.rect(self.screen, self.a_colour, next_rect)
pygame.display.update(next_rect)
self.current_position = next_position
def move_character_smooth(self, next_position, steps):
if next_position[0] != self.current_position[0]:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[0] - self.current_position[0]
) * i / steps
next_pos = self.current_position[0
] + difference, self.current_position[1]
self.move_character(next_pos)
else:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[1] - self.current_position[1]
) * i / steps
next_pos = self.current_position[0], self.current_position[1
] + difference
self.move_character(next_pos)
def get_current_position(self):
return self.current_position
def reached_goal(self):
if self.current_position == self.end_point:
return True
else:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pygame
from time import sleep
class Character:
def __init__(self, screen, side_length, border_width, valid_points,
start_point, end_point, current_position, a_colour, na_colour, keys
=None, k_colour=None):
self.screen = screen
self.side_length = side_length
self.border_width = border_width
self.start_point = start_point
self.end_point = end_point
self.current_position = current_position
self.a_colour = a_colour
self.na_colour = na_colour
self.draw_position()
def draw_position(self):
pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (
self.side_length + self.border_width) * self.current_position[0
], self.border_width + (self.side_length + self.border_width) *
self.current_position[1], self.side_length, self.side_length])
def move_character(self, next_position):
current_rect = [self.border_width + (self.side_length + self.
border_width) * self.current_position[0], self.border_width + (
self.side_length + self.border_width) * self.current_position[1
], self.side_length, self.side_length]
next_rect = [self.border_width + (self.side_length + self.
border_width) * next_position[0], self.border_width + (self.
side_length + self.border_width) * next_position[1], self.
side_length, self.side_length]
pygame.draw.rect(self.screen, self.na_colour, current_rect)
pygame.display.update(current_rect)
pygame.draw.rect(self.screen, self.a_colour, next_rect)
pygame.display.update(next_rect)
self.current_position = next_position
def move_character_smooth(self, next_position, steps):
if next_position[0] != self.current_position[0]:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[0] - self.current_position[0]
) * i / steps
next_pos = self.current_position[0
] + difference, self.current_position[1]
self.move_character(next_pos)
else:
for i in range(1, steps + 1):
sleep(0.005)
difference = (next_position[1] - self.current_position[1]
) * i / steps
next_pos = self.current_position[0], self.current_position[1
] + difference
self.move_character(next_pos)
def get_current_position(self):
return self.current_position
def reached_goal(self):
if self.current_position == self.end_point:
return True
else:
return False
<|reserved_special_token_1|>
'''
Character class
'''
import pygame
from time import sleep
class Character:
def __init__(self, screen, side_length, border_width, valid_points, start_point, end_point, current_position, a_colour, na_colour,\
keys=None, k_colour=None):
self.screen = screen # pygame screen
self.side_length = side_length # length of the grid unit
self.border_width = border_width # border width of the grid unit
self.start_point = start_point # starting point of character in maze stored as a tuple
self.end_point = end_point # end point of character in maze (tuple)
self.current_position = current_position # current position of character (tuple)
self.a_colour = a_colour # active colour of the character (tuple of 3 elements) RGB colour
self.na_colour = na_colour # inactive colour of the character (tuple of 3 elements) RGB colour
# draw the initial position of the character
self.draw_position()
# draw the character
def draw_position(self):
pygame.draw.rect(self.screen, self.a_colour, [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\
self.border_width+(self.side_length+self.border_width)*self.current_position[1], self.side_length, self.side_length])
# move the character to next position
def move_character(self, next_position):
# create a rectangle for the current position
current_rect = [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\
self.border_width+(self.side_length+self.border_width)*self.current_position[1],\
self.side_length, self.side_length]
# create a rectangle for the next position
next_rect = [self.border_width+(self.side_length+self.border_width)*next_position[0],\
self.border_width+(self.side_length+self.border_width)*next_position[1],\
self.side_length, self.side_length]
# draw the previous position of the character as an inactive block
pygame.draw.rect(self.screen, self.na_colour, current_rect)
# update the screen at the current point
pygame.display.update(current_rect)
# draw the next position of the character
pygame.draw.rect(self.screen, self.a_colour, next_rect)
# update the screen at the next point
pygame.display.update(next_rect)
# update the current position of the character to the next position
self.current_position = next_position
# draw the intermediate steps when moving a character
def move_character_smooth(self, next_position, steps):
# go right
if next_position[0] != self.current_position[0]:
# from i = 1 to steps
for i in range(1,steps+1):
# short delay between each intermediate step
sleep(0.005)
difference = (next_position[0]-self.current_position[0])*i/steps
next_pos = (self.current_position[0]+difference, self.current_position[1])
self.move_character(next_pos)
else:
for i in range(1,steps+1):
sleep(0.005)
difference = (next_position[1]-self.current_position[1])*i/steps
next_pos = (self.current_position[0], self.current_position[1]+difference)
self.move_character(next_pos)
# return the current position of the character
def get_current_position(self):
return self.current_position
# end goal flag
def reached_goal(self):
if self.current_position == self.end_point:
return True
else:
return False
|
flexible
|
{
"blob_id": "f7f96b19bdc20f732566709a7801002fe49d49eb",
"index": 3214,
"step-1": "<mask token>\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n <mask token>\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n\n def draw_position(self):\n pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (\n self.side_length + self.border_width) * self.current_position[0\n ], self.border_width + (self.side_length + self.border_width) *\n self.current_position[1], self.side_length, self.side_length])\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n\n def draw_position(self):\n pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (\n self.side_length + self.border_width) * self.current_position[0\n ], self.border_width + (self.side_length + self.border_width) *\n self.current_position[1], self.side_length, self.side_length])\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n\n def reached_goal(self):\n if self.current_position == self.end_point:\n return True\n else:\n return False\n",
"step-4": "<mask token>\nimport pygame\nfrom time import sleep\n\n\nclass Character:\n\n def __init__(self, screen, side_length, border_width, valid_points,\n start_point, end_point, current_position, a_colour, na_colour, keys\n =None, k_colour=None):\n self.screen = screen\n self.side_length = side_length\n self.border_width = border_width\n self.start_point = start_point\n self.end_point = end_point\n self.current_position = current_position\n self.a_colour = a_colour\n self.na_colour = na_colour\n self.draw_position()\n\n def draw_position(self):\n pygame.draw.rect(self.screen, self.a_colour, [self.border_width + (\n self.side_length + self.border_width) * self.current_position[0\n ], self.border_width + (self.side_length + self.border_width) *\n self.current_position[1], self.side_length, self.side_length])\n\n def move_character(self, next_position):\n current_rect = [self.border_width + (self.side_length + self.\n border_width) * self.current_position[0], self.border_width + (\n self.side_length + self.border_width) * self.current_position[1\n ], self.side_length, self.side_length]\n next_rect = [self.border_width + (self.side_length + self.\n border_width) * next_position[0], self.border_width + (self.\n side_length + self.border_width) * next_position[1], self.\n side_length, self.side_length]\n pygame.draw.rect(self.screen, self.na_colour, current_rect)\n pygame.display.update(current_rect)\n pygame.draw.rect(self.screen, self.a_colour, next_rect)\n pygame.display.update(next_rect)\n self.current_position = next_position\n\n def move_character_smooth(self, next_position, steps):\n if next_position[0] != self.current_position[0]:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[0] - self.current_position[0]\n ) * i / steps\n next_pos = self.current_position[0\n ] + difference, self.current_position[1]\n self.move_character(next_pos)\n else:\n for i in range(1, steps + 1):\n sleep(0.005)\n difference = (next_position[1] - self.current_position[1]\n ) * i / steps\n next_pos = self.current_position[0], self.current_position[1\n ] + difference\n self.move_character(next_pos)\n\n def get_current_position(self):\n return self.current_position\n\n def reached_goal(self):\n if self.current_position == self.end_point:\n return True\n else:\n return False\n",
"step-5": "'''\nCharacter class\n'''\n\nimport pygame\nfrom time import sleep\n\nclass Character:\n\n\tdef __init__(self, screen, side_length, border_width, valid_points, start_point, end_point, current_position, a_colour, na_colour,\\\n\t\t\t\tkeys=None, k_colour=None):\n\n\t\tself.screen = screen # pygame screen\n\t\tself.side_length = side_length # length of the grid unit\n\t\tself.border_width = border_width # border width of the grid unit\n\t\tself.start_point = start_point # starting point of character in maze stored as a tuple\n\t\tself.end_point = end_point # end point of character in maze (tuple)\n\t\tself.current_position = current_position # current position of character (tuple)\n\t\tself.a_colour = a_colour # active colour of the character (tuple of 3 elements) RGB colour\n\t\tself.na_colour = na_colour # inactive colour of the character (tuple of 3 elements) RGB colour\n\t\t\n\t\t\t\n\t\t# draw the initial position of the character\n\t\tself.draw_position()\n\n\t# draw the character\n\tdef draw_position(self):\n\t\tpygame.draw.rect(self.screen, self.a_colour, [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\\\n\t\t\tself.border_width+(self.side_length+self.border_width)*self.current_position[1], self.side_length, self.side_length])\n\n\t# move the character to next position\n\tdef move_character(self, next_position):\n\t\t# create a rectangle for the current position\n\t\tcurrent_rect = [self.border_width+(self.side_length+self.border_width)*self.current_position[0],\\\n\t\t\t\t\t\tself.border_width+(self.side_length+self.border_width)*self.current_position[1],\\\n\t\t\t\t\t\tself.side_length, self.side_length]\n\t\t# create a rectangle for the next position\n\t\tnext_rect = [self.border_width+(self.side_length+self.border_width)*next_position[0],\\\n\t\t\t\t\t self.border_width+(self.side_length+self.border_width)*next_position[1],\\\n\t\t\t\t\t self.side_length, self.side_length]\n\t\t# draw the previous position of the character as an inactive block\n\t\tpygame.draw.rect(self.screen, self.na_colour, current_rect)\n\t\t# update the screen at the current point\n\t\tpygame.display.update(current_rect)\n\t\t# draw the next position of the character\n\t\tpygame.draw.rect(self.screen, self.a_colour, next_rect)\n\t\t# update the screen at the next point\n\t\tpygame.display.update(next_rect)\n\t\t# update the current position of the character to the next position\n\t\tself.current_position = next_position\n\n\n\t# draw the intermediate steps when moving a character\n\tdef move_character_smooth(self, next_position, steps):\n\t\t# go right\n\t\tif next_position[0] != self.current_position[0]:\n\t\t\t# from i = 1 to steps\n\t\t\tfor i in range(1,steps+1):\n\t\t\t\t# short delay between each intermediate step\n\t\t\t\tsleep(0.005)\n\t\t\t\tdifference = (next_position[0]-self.current_position[0])*i/steps\n\t\t\t\tnext_pos = (self.current_position[0]+difference, self.current_position[1])\n\t\t\t\tself.move_character(next_pos)\n\t\telse:\n\t\t\tfor i in range(1,steps+1):\n\t\t\t\tsleep(0.005)\n\t\t\t\tdifference = (next_position[1]-self.current_position[1])*i/steps\n\t\t\t\tnext_pos = (self.current_position[0], self.current_position[1]+difference)\n\t\t\t\tself.move_character(next_pos)\n\n\t# return the current position of the character\n\tdef get_current_position(self):\n\t\treturn self.current_position\n\n\t# end goal flag\n\tdef reached_goal(self):\n\t\tif self.current_position == self.end_point:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__('Dice "faces_items" argsument need to be iterable.')
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(
f'Dice "faces_items" count need to be greater or equal to {min_count}.'
)
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__('Dice instance expected.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DiceWrongFacesCountError(Exception):
<|reserved_special_token_0|>
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__('Dice "faces_items" argsument need to be iterable.')
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(
f'Dice "faces_items" count need to be greater or equal to {min_count}.'
)
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__('Dice instance expected.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DiceWrongFacesCountTypeError(Exception):
<|reserved_special_token_0|>
class DiceWrongFacesCountError(Exception):
def __init__(self, min_count):
super().__init__(
f'Dice "faces_count" argsument need to be greater or equal to {min_count}.'
)
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__('Dice "faces_items" argsument need to be iterable.')
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(
f'Dice "faces_items" count need to be greater or equal to {min_count}.'
)
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__('Dice instance expected.')
<|reserved_special_token_1|>
class DiceEmptyInialItemsError(Exception):
def __init__(self):
super().__init__(
'To dice creation whether "faces_count" or "faces_items" argsuments need to be passed.'
)
class DiceWrongFacesCountTypeError(Exception):
def __init__(self):
super().__init__('Dice "faces_count" argsument type need to be "int".')
class DiceWrongFacesCountError(Exception):
def __init__(self, min_count):
super().__init__(
f'Dice "faces_count" argsument need to be greater or equal to {min_count}.'
)
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__('Dice "faces_items" argsument need to be iterable.')
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(
f'Dice "faces_items" count need to be greater or equal to {min_count}.'
)
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__('Dice instance expected.')
<|reserved_special_token_1|>
# Copyright 2021 Yegor Bitensky
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DiceEmptyInialItemsError(Exception):
def __init__(self):
super().__init__(
"To dice creation "
"whether \"faces_count\" or \"faces_items\" "
"argsuments need to be passed."
)
class DiceWrongFacesCountTypeError(Exception):
def __init__(self):
super().__init__("Dice \"faces_count\" argsument type need to be \"int\".")
class DiceWrongFacesCountError(Exception):
def __init__(self, min_count):
super().__init__(f"Dice \"faces_count\" argsument need to be greater or equal to {min_count}.")
class DiceWrongFacesItemsTypeError(Exception):
def __init__(self):
super().__init__("Dice \"faces_items\" argsument need to be iterable.")
class DiceWrongFacesItemsCountError(Exception):
def __init__(self, min_count):
super().__init__(f"Dice \"faces_items\" count need to be greater or equal to {min_count}.")
class DiceBoxWrongItemAdditionError(Exception):
def __init__(self):
super().__init__("Dice instance expected.")
|
flexible
|
{
"blob_id": "5750fd4b59f75ea63b4214ee66b23602ed4d314d",
"index": 8909,
"step-1": "<mask token>\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-2": "<mask token>\n\n\nclass DiceWrongFacesCountError(Exception):\n <mask token>\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-3": "<mask token>\n\n\nclass DiceWrongFacesCountTypeError(Exception):\n <mask token>\n\n\nclass DiceWrongFacesCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_count\" argsument need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-4": "class DiceEmptyInialItemsError(Exception):\n\n def __init__(self):\n super().__init__(\n 'To dice creation whether \"faces_count\" or \"faces_items\" argsuments need to be passed.'\n )\n\n\nclass DiceWrongFacesCountTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_count\" argsument type need to be \"int\".')\n\n\nclass DiceWrongFacesCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_count\" argsument need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n\n def __init__(self):\n super().__init__('Dice \"faces_items\" argsument need to be iterable.')\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n\n def __init__(self, min_count):\n super().__init__(\n f'Dice \"faces_items\" count need to be greater or equal to {min_count}.'\n )\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n\n def __init__(self):\n super().__init__('Dice instance expected.')\n",
"step-5": "# Copyright 2021 Yegor Bitensky\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass DiceEmptyInialItemsError(Exception):\n def __init__(self):\n super().__init__(\n \"To dice creation \"\n \"whether \\\"faces_count\\\" or \\\"faces_items\\\" \"\n \"argsuments need to be passed.\"\n )\n\n\nclass DiceWrongFacesCountTypeError(Exception):\n def __init__(self):\n super().__init__(\"Dice \\\"faces_count\\\" argsument type need to be \\\"int\\\".\")\n\n\nclass DiceWrongFacesCountError(Exception):\n def __init__(self, min_count):\n super().__init__(f\"Dice \\\"faces_count\\\" argsument need to be greater or equal to {min_count}.\")\n\n\nclass DiceWrongFacesItemsTypeError(Exception):\n def __init__(self):\n super().__init__(\"Dice \\\"faces_items\\\" argsument need to be iterable.\")\n\n\nclass DiceWrongFacesItemsCountError(Exception):\n def __init__(self, min_count):\n super().__init__(f\"Dice \\\"faces_items\\\" count need to be greater or equal to {min_count}.\")\n\n\nclass DiceBoxWrongItemAdditionError(Exception):\n def __init__(self):\n super().__init__(\"Dice instance expected.\")\n",
"step-ids": [
6,
7,
9,
12,
13
]
}
|
[
6,
7,
9,
12,
13
] |
# Generated by Django 2.2.6 on 2020-04-06 16:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('user_id', models.IntegerField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=45)),
('userlogin', models.CharField(max_length=45)),
('avartar_url', models.CharField(blank=True, max_length=150, null=True)),
],
options={
'db_table': 'user',
},
),
migrations.CreateModel(
name='Repos',
fields=[
('repo_id', models.IntegerField(primary_key=True, serialize=False)),
('reponame', models.CharField(max_length=150)),
('owner', models.CharField(max_length=45)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='attendance.User')),
],
options={
'db_table': 'repos',
},
),
]
|
normal
|
{
"blob_id": "1b71789ba7c2191b433a405723fe6c985c926610",
"index": 8620,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='User', fields=[('user_id',\n models.IntegerField(primary_key=True, serialize=False)), (\n 'username', models.CharField(max_length=45)), ('userlogin', models.\n CharField(max_length=45)), ('avartar_url', models.CharField(blank=\n True, max_length=150, null=True))], options={'db_table': 'user'}),\n migrations.CreateModel(name='Repos', fields=[('repo_id', models.\n IntegerField(primary_key=True, serialize=False)), ('reponame',\n models.CharField(max_length=150)), ('owner', models.CharField(\n max_length=45)), ('user', models.ForeignKey(on_delete=django.db.\n models.deletion.DO_NOTHING, to='attendance.User'))], options={\n 'db_table': 'repos'})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='User', fields=[('user_id',\n models.IntegerField(primary_key=True, serialize=False)), (\n 'username', models.CharField(max_length=45)), ('userlogin', models.\n CharField(max_length=45)), ('avartar_url', models.CharField(blank=\n True, max_length=150, null=True))], options={'db_table': 'user'}),\n migrations.CreateModel(name='Repos', fields=[('repo_id', models.\n IntegerField(primary_key=True, serialize=False)), ('reponame',\n models.CharField(max_length=150)), ('owner', models.CharField(\n max_length=45)), ('user', models.ForeignKey(on_delete=django.db.\n models.deletion.DO_NOTHING, to='attendance.User'))], options={\n 'db_table': 'repos'})]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-04-06 16:47\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='User',\r\n fields=[\r\n ('user_id', models.IntegerField(primary_key=True, serialize=False)),\r\n ('username', models.CharField(max_length=45)),\r\n ('userlogin', models.CharField(max_length=45)),\r\n ('avartar_url', models.CharField(blank=True, max_length=150, null=True)),\r\n ],\r\n options={\r\n 'db_table': 'user',\r\n },\r\n ),\r\n migrations.CreateModel(\r\n name='Repos',\r\n fields=[\r\n ('repo_id', models.IntegerField(primary_key=True, serialize=False)),\r\n ('reponame', models.CharField(max_length=150)),\r\n ('owner', models.CharField(max_length=45)),\r\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='attendance.User')),\r\n ],\r\n options={\r\n 'db_table': 'repos',\r\n },\r\n ),\r\n ]\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('projects', '0001_initial'), ('users',
'0003_user_projects')]
operations = [migrations.RemoveField(model_name='user', name='projects'
), migrations.AddField(model_name='user', name='projects', field=
models.ManyToManyField(related_name='projects', to='projects.Project'))
]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('projects', '0001_initial'), ('users',
'0003_user_projects')]
operations = [migrations.RemoveField(model_name='user', name='projects'
), migrations.AddField(model_name='user', name='projects', field=
models.ManyToManyField(related_name='projects', to='projects.Project'))
]
<|reserved_special_token_1|>
# Generated by Django 2.0.13 on 2019-05-23 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
('users', '0003_user_projects'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='projects',
),
migrations.AddField(
model_name='user',
name='projects',
field=models.ManyToManyField(related_name='projects', to='projects.Project'),
),
]
|
flexible
|
{
"blob_id": "547935a67fb079e551534126534234ceb96ed0dd",
"index": 7648,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0001_initial'), ('users',\n '0003_user_projects')]\n operations = [migrations.RemoveField(model_name='user', name='projects'\n ), migrations.AddField(model_name='user', name='projects', field=\n models.ManyToManyField(related_name='projects', to='projects.Project'))\n ]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('projects', '0001_initial'), ('users',\n '0003_user_projects')]\n operations = [migrations.RemoveField(model_name='user', name='projects'\n ), migrations.AddField(model_name='user', name='projects', field=\n models.ManyToManyField(related_name='projects', to='projects.Project'))\n ]\n",
"step-5": "# Generated by Django 2.0.13 on 2019-05-23 14:12\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0001_initial'),\n ('users', '0003_user_projects'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='user',\n name='projects',\n ),\n migrations.AddField(\n model_name='user',\n name='projects',\n field=models.ManyToManyField(related_name='projects', to='projects.Project'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Functions for parsing various strings to RGB tuples."""
import json
import re
from pathlib import Path
import importlib.resources as resources
from pilutils.basic import hex_to_rgb
__all__ = [
"parse_hex6",
"parse_hex3",
"parse_rgbfunc_int",
"parse_rgbfunc_float",
"parse_rgbfunc_percent",
"parse_name_css",
"parse_name_crayola",
"parse_name_xkcd",
"parse_name_meodai_best",
"parse_name_meodai",
"parse",
]
_css_names = json.loads(resources.read_text("pilutils.colornames", "css.json"))
_crayola_names = json.loads(resources.read_text("pilutils.colornames", "crayola.json"))
_xkcd_names = json.loads(resources.read_text("pilutils.colornames", "xkcd.json"))
_meodai_best_names = json.loads(
resources.read_text("pilutils.colornames", "meodai-best.json")
)
_meodai_names = json.loads(resources.read_text("pilutils.colornames", "meodai.json"))
def parse_hex6(hex6):
"""Example: #ab34df"""
if m := re.match(r"^#?([0-9A-Fa-f]{6})$", hex6.strip()):
h = int(m.group(1), 16)
return hex_to_rgb(h)
raise ValueError(f"String {hex6!r} does not match hex6 format.")
def parse_hex3(hex3):
"""Example: #a3d"""
if m := re.match(r"^#?([0-9A-Fa-f]{3})$", hex3.strip()):
h3 = m.group(1)
return tuple(int(c * 2, 16) for c in h3)
raise ValueError(f"String {hex3!r} does not match hex3 format.")
def parse_rgbfunc_int(rgbfunc):
"""Example: rgb(171, 52, 223)"""
if m := re.match(
r"^rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)$", rgbfunc.strip()
):
t = tuple(map(int, m.groups()))
if not any(n > 255 for n in t):
return t
raise ValueError(f"String {rgbfunc!r} does not match rgbfunc_int format.")
def parse_rgbfunc_float(rgbfunc):
"""Example: rgb(0.67, 0.2, 0.87)"""
if m := re.match(
r"^rgb\(\s*([01]\.\d+)\s*,\s*([01]\.\d+)\s*,\s*([01]\.\d+)\s*\)$",
rgbfunc.strip(),
):
t = tuple(map(float, m.groups()))
if not any(n > 1 for n in t):
return tuple(int(round(n * 255)) for n in t)
raise ValueError(f"String {rgbfunc!r} does not match rgbfunc_float format.")
def parse_rgbfunc_percent(rgbfunc):
"""Example: rgb(67%, 20%, 87.5%)"""
if m := re.match(
r"^rgb\(\s*(\d{1,3}(?:\.\d+)?)%\s*,\s*(\d{1,3}(?:\.\d+)?)%\s*,\s*(\d{1,3}(?:\.\d+)?)%\s*\)$",
rgbfunc.strip(),
):
t = tuple(map(float, m.groups()))
if not any(n > 100 for n in t):
return tuple(int(round(n * 255 / 100)) for n in t)
raise ValueError(f"String {rgbfunc!r} does not match rgbfunc_percent format.")
def parse_name_css(name):
name = name.lower()
if name not in _css_names:
raise ValueError(f"Color {name!r} is not named in the CSS dataset.")
return parse_hex6(_css_names[name])
def parse_name_crayola(name):
name = name.lower()
if name not in _crayola_names:
raise ValueError(f"Color {name!r} is not named in the crayola dataset.")
return parse_hex6(_crayola_names[name])
def parse_name_xkcd(name):
name = name.lower()
if name not in _xkcd_names:
raise ValueError(f"Color {name!r} is not named in the xkcd dataset.")
return parse_hex6(_xkcd_names[name])
def parse_name_meodai_best(name):
name = name.lower()
if name not in _meodai_best_names:
raise ValueError(f"Color {name!r} is not named in the meodai-best dataset.")
return parse_hex6(_meodai_best_names[name])
def parse_name_meodai(name):
name = name.lower()
if name not in _meodai_names:
raise ValueError(f"Color {name!r} is not named in the meodai dataset.")
return parse_hex6(_meodai_names[name])
def parse(
colstr,
*,
hex6=True,
hex3=True,
rgbfunc_int=True,
rgbfunc_float=True,
rgbfunc_percent=True,
name_css=True,
name_crayola=True,
name_xkcd=True,
name_meodai_best=True,
name_meodai=True,
):
"""Combine all other parse functions into one "universal" function. Use kwargs to disable certain parsers."""
funcs = []
if hex6:
funcs.append(parse_hex6)
if hex3:
funcs.append(parse_hex3)
if rgbfunc_int:
funcs.append(parse_rgbfunc_int)
if rgbfunc_float:
funcs.append(parse_rgbfunc_float)
if rgbfunc_percent:
funcs.append(parse_rgbfunc_percent)
if name_css:
funcs.append(parse_name_css)
if name_crayola:
funcs.append(parse_name_crayola)
if name_xkcd:
funcs.append(parse_name_xkcd)
if name_meodai_best:
funcs.append(parse_name_meodai_best)
if name_meodai:
funcs.append(parse_name_meodai)
res = None
for func in funcs:
try:
res = func(colstr)
except ValueError:
pass
if res is None:
raise ValueError(f"Could not find a working parser for {colstr!r}.")
return res
|
normal
|
{
"blob_id": "978f3979aee1c4361483fd61b54352e7fff8d3b3",
"index": 697,
"step-1": "<mask token>\n\n\ndef parse_hex3(hex3):\n \"\"\"Example: #a3d\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):\n h3 = m.group(1)\n return tuple(int(c * 2, 16) for c in h3)\n raise ValueError(f'String {hex3!r} does not match hex3 format.')\n\n\n<mask token>\n\n\ndef parse_rgbfunc_float(rgbfunc):\n \"\"\"Example: rgb(0.67, 0.2, 0.87)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 1 for n in t):\n return tuple(int(round(n * 255)) for n in t)\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'\n )\n\n\ndef parse_rgbfunc_percent(rgbfunc):\n \"\"\"Example: rgb(67%, 20%, 87.5%)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 100 for n in t):\n return tuple(int(round(n * 255 / 100)) for n in t)\n raise ValueError(\n f'String {rgbfunc!r} does not match rgbfunc_percent format.')\n\n\n<mask token>\n\n\ndef parse_name_crayola(name):\n name = name.lower()\n if name not in _crayola_names:\n raise ValueError(f'Color {name!r} is not named in the crayola dataset.'\n )\n return parse_hex6(_crayola_names[name])\n\n\n<mask token>\n\n\ndef parse_name_meodai_best(name):\n name = name.lower()\n if name not in _meodai_best_names:\n raise ValueError(\n f'Color {name!r} is not named in the meodai-best dataset.')\n return parse_hex6(_meodai_best_names[name])\n\n\ndef parse_name_meodai(name):\n name = name.lower()\n if name not in _meodai_names:\n raise ValueError(f'Color {name!r} is not named in the meodai dataset.')\n return parse_hex6(_meodai_names[name])\n\n\ndef parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=\n True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd\n =True, name_meodai_best=True, name_meodai=True):\n \"\"\"Combine all other parse functions into one \"universal\" function. Use kwargs to disable certain parsers.\"\"\"\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f'Could not find a working parser for {colstr!r}.')\n return res\n",
"step-2": "<mask token>\n\n\ndef parse_hex6(hex6):\n \"\"\"Example: #ab34df\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{6})$', hex6.strip())):\n h = int(m.group(1), 16)\n return hex_to_rgb(h)\n raise ValueError(f'String {hex6!r} does not match hex6 format.')\n\n\ndef parse_hex3(hex3):\n \"\"\"Example: #a3d\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):\n h3 = m.group(1)\n return tuple(int(c * 2, 16) for c in h3)\n raise ValueError(f'String {hex3!r} does not match hex3 format.')\n\n\ndef parse_rgbfunc_int(rgbfunc):\n \"\"\"Example: rgb(171, 52, 223)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3})\\\\s*,\\\\s*(\\\\d{1,3})\\\\s*,\\\\s*(\\\\d{1,3})\\\\s*\\\\)$',\n rgbfunc.strip())):\n t = tuple(map(int, m.groups()))\n if not any(n > 255 for n in t):\n return t\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_int format.')\n\n\ndef parse_rgbfunc_float(rgbfunc):\n \"\"\"Example: rgb(0.67, 0.2, 0.87)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 1 for n in t):\n return tuple(int(round(n * 255)) for n in t)\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'\n )\n\n\ndef parse_rgbfunc_percent(rgbfunc):\n \"\"\"Example: rgb(67%, 20%, 87.5%)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 100 for n in t):\n return tuple(int(round(n * 255 / 100)) for n in t)\n raise ValueError(\n f'String {rgbfunc!r} does not match rgbfunc_percent format.')\n\n\ndef parse_name_css(name):\n name = name.lower()\n if name not in _css_names:\n raise ValueError(f'Color {name!r} is not named in the CSS dataset.')\n return parse_hex6(_css_names[name])\n\n\ndef parse_name_crayola(name):\n name = name.lower()\n if name not in _crayola_names:\n raise ValueError(f'Color {name!r} is not named in the crayola dataset.'\n )\n return parse_hex6(_crayola_names[name])\n\n\n<mask token>\n\n\ndef parse_name_meodai_best(name):\n name = name.lower()\n if name not in _meodai_best_names:\n raise ValueError(\n f'Color {name!r} is not named in the meodai-best dataset.')\n return parse_hex6(_meodai_best_names[name])\n\n\ndef parse_name_meodai(name):\n name = name.lower()\n if name not in _meodai_names:\n raise ValueError(f'Color {name!r} is not named in the meodai dataset.')\n return parse_hex6(_meodai_names[name])\n\n\ndef parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=\n True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd\n =True, name_meodai_best=True, name_meodai=True):\n \"\"\"Combine all other parse functions into one \"universal\" function. Use kwargs to disable certain parsers.\"\"\"\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f'Could not find a working parser for {colstr!r}.')\n return res\n",
"step-3": "<mask token>\n__all__ = ['parse_hex6', 'parse_hex3', 'parse_rgbfunc_int',\n 'parse_rgbfunc_float', 'parse_rgbfunc_percent', 'parse_name_css',\n 'parse_name_crayola', 'parse_name_xkcd', 'parse_name_meodai_best',\n 'parse_name_meodai', 'parse']\n_css_names = json.loads(resources.read_text('pilutils.colornames', 'css.json'))\n_crayola_names = json.loads(resources.read_text('pilutils.colornames',\n 'crayola.json'))\n_xkcd_names = json.loads(resources.read_text('pilutils.colornames',\n 'xkcd.json'))\n_meodai_best_names = json.loads(resources.read_text('pilutils.colornames',\n 'meodai-best.json'))\n_meodai_names = json.loads(resources.read_text('pilutils.colornames',\n 'meodai.json'))\n\n\ndef parse_hex6(hex6):\n \"\"\"Example: #ab34df\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{6})$', hex6.strip())):\n h = int(m.group(1), 16)\n return hex_to_rgb(h)\n raise ValueError(f'String {hex6!r} does not match hex6 format.')\n\n\ndef parse_hex3(hex3):\n \"\"\"Example: #a3d\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):\n h3 = m.group(1)\n return tuple(int(c * 2, 16) for c in h3)\n raise ValueError(f'String {hex3!r} does not match hex3 format.')\n\n\ndef parse_rgbfunc_int(rgbfunc):\n \"\"\"Example: rgb(171, 52, 223)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3})\\\\s*,\\\\s*(\\\\d{1,3})\\\\s*,\\\\s*(\\\\d{1,3})\\\\s*\\\\)$',\n rgbfunc.strip())):\n t = tuple(map(int, m.groups()))\n if not any(n > 255 for n in t):\n return t\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_int format.')\n\n\ndef parse_rgbfunc_float(rgbfunc):\n \"\"\"Example: rgb(0.67, 0.2, 0.87)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 1 for n in t):\n return tuple(int(round(n * 255)) for n in t)\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'\n )\n\n\ndef parse_rgbfunc_percent(rgbfunc):\n \"\"\"Example: rgb(67%, 20%, 87.5%)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 100 for n in t):\n return tuple(int(round(n * 255 / 100)) for n in t)\n raise ValueError(\n f'String {rgbfunc!r} does not match rgbfunc_percent format.')\n\n\ndef parse_name_css(name):\n name = name.lower()\n if name not in _css_names:\n raise ValueError(f'Color {name!r} is not named in the CSS dataset.')\n return parse_hex6(_css_names[name])\n\n\ndef parse_name_crayola(name):\n name = name.lower()\n if name not in _crayola_names:\n raise ValueError(f'Color {name!r} is not named in the crayola dataset.'\n )\n return parse_hex6(_crayola_names[name])\n\n\ndef parse_name_xkcd(name):\n name = name.lower()\n if name not in _xkcd_names:\n raise ValueError(f'Color {name!r} is not named in the xkcd dataset.')\n return parse_hex6(_xkcd_names[name])\n\n\ndef parse_name_meodai_best(name):\n name = name.lower()\n if name not in _meodai_best_names:\n raise ValueError(\n f'Color {name!r} is not named in the meodai-best dataset.')\n return parse_hex6(_meodai_best_names[name])\n\n\ndef parse_name_meodai(name):\n name = name.lower()\n if name not in _meodai_names:\n raise ValueError(f'Color {name!r} is not named in the meodai dataset.')\n return parse_hex6(_meodai_names[name])\n\n\ndef parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=\n True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd\n =True, name_meodai_best=True, name_meodai=True):\n \"\"\"Combine all other parse functions into one \"universal\" function. Use kwargs to disable certain parsers.\"\"\"\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f'Could not find a working parser for {colstr!r}.')\n return res\n",
"step-4": "<mask token>\nimport json\nimport re\nfrom pathlib import Path\nimport importlib.resources as resources\nfrom pilutils.basic import hex_to_rgb\n__all__ = ['parse_hex6', 'parse_hex3', 'parse_rgbfunc_int',\n 'parse_rgbfunc_float', 'parse_rgbfunc_percent', 'parse_name_css',\n 'parse_name_crayola', 'parse_name_xkcd', 'parse_name_meodai_best',\n 'parse_name_meodai', 'parse']\n_css_names = json.loads(resources.read_text('pilutils.colornames', 'css.json'))\n_crayola_names = json.loads(resources.read_text('pilutils.colornames',\n 'crayola.json'))\n_xkcd_names = json.loads(resources.read_text('pilutils.colornames',\n 'xkcd.json'))\n_meodai_best_names = json.loads(resources.read_text('pilutils.colornames',\n 'meodai-best.json'))\n_meodai_names = json.loads(resources.read_text('pilutils.colornames',\n 'meodai.json'))\n\n\ndef parse_hex6(hex6):\n \"\"\"Example: #ab34df\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{6})$', hex6.strip())):\n h = int(m.group(1), 16)\n return hex_to_rgb(h)\n raise ValueError(f'String {hex6!r} does not match hex6 format.')\n\n\ndef parse_hex3(hex3):\n \"\"\"Example: #a3d\"\"\"\n if (m := re.match('^#?([0-9A-Fa-f]{3})$', hex3.strip())):\n h3 = m.group(1)\n return tuple(int(c * 2, 16) for c in h3)\n raise ValueError(f'String {hex3!r} does not match hex3 format.')\n\n\ndef parse_rgbfunc_int(rgbfunc):\n \"\"\"Example: rgb(171, 52, 223)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3})\\\\s*,\\\\s*(\\\\d{1,3})\\\\s*,\\\\s*(\\\\d{1,3})\\\\s*\\\\)$',\n rgbfunc.strip())):\n t = tuple(map(int, m.groups()))\n if not any(n > 255 for n in t):\n return t\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_int format.')\n\n\ndef parse_rgbfunc_float(rgbfunc):\n \"\"\"Example: rgb(0.67, 0.2, 0.87)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*,\\\\s*([01]\\\\.\\\\d+)\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 1 for n in t):\n return tuple(int(round(n * 255)) for n in t)\n raise ValueError(f'String {rgbfunc!r} does not match rgbfunc_float format.'\n )\n\n\ndef parse_rgbfunc_percent(rgbfunc):\n \"\"\"Example: rgb(67%, 20%, 87.5%)\"\"\"\n if (m := re.match(\n '^rgb\\\\(\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*,\\\\s*(\\\\d{1,3}(?:\\\\.\\\\d+)?)%\\\\s*\\\\)$'\n , rgbfunc.strip())):\n t = tuple(map(float, m.groups()))\n if not any(n > 100 for n in t):\n return tuple(int(round(n * 255 / 100)) for n in t)\n raise ValueError(\n f'String {rgbfunc!r} does not match rgbfunc_percent format.')\n\n\ndef parse_name_css(name):\n name = name.lower()\n if name not in _css_names:\n raise ValueError(f'Color {name!r} is not named in the CSS dataset.')\n return parse_hex6(_css_names[name])\n\n\ndef parse_name_crayola(name):\n name = name.lower()\n if name not in _crayola_names:\n raise ValueError(f'Color {name!r} is not named in the crayola dataset.'\n )\n return parse_hex6(_crayola_names[name])\n\n\ndef parse_name_xkcd(name):\n name = name.lower()\n if name not in _xkcd_names:\n raise ValueError(f'Color {name!r} is not named in the xkcd dataset.')\n return parse_hex6(_xkcd_names[name])\n\n\ndef parse_name_meodai_best(name):\n name = name.lower()\n if name not in _meodai_best_names:\n raise ValueError(\n f'Color {name!r} is not named in the meodai-best dataset.')\n return parse_hex6(_meodai_best_names[name])\n\n\ndef parse_name_meodai(name):\n name = name.lower()\n if name not in _meodai_names:\n raise ValueError(f'Color {name!r} is not named in the meodai dataset.')\n return parse_hex6(_meodai_names[name])\n\n\ndef parse(colstr, *, hex6=True, hex3=True, rgbfunc_int=True, rgbfunc_float=\n True, rgbfunc_percent=True, name_css=True, name_crayola=True, name_xkcd\n =True, name_meodai_best=True, name_meodai=True):\n \"\"\"Combine all other parse functions into one \"universal\" function. Use kwargs to disable certain parsers.\"\"\"\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f'Could not find a working parser for {colstr!r}.')\n return res\n",
"step-5": "\"\"\"Functions for parsing various strings to RGB tuples.\"\"\"\nimport json\nimport re\nfrom pathlib import Path\nimport importlib.resources as resources\n\nfrom pilutils.basic import hex_to_rgb\n\n__all__ = [\n \"parse_hex6\",\n \"parse_hex3\",\n \"parse_rgbfunc_int\",\n \"parse_rgbfunc_float\",\n \"parse_rgbfunc_percent\",\n \"parse_name_css\",\n \"parse_name_crayola\",\n \"parse_name_xkcd\",\n \"parse_name_meodai_best\",\n \"parse_name_meodai\",\n \"parse\",\n]\n\n_css_names = json.loads(resources.read_text(\"pilutils.colornames\", \"css.json\"))\n_crayola_names = json.loads(resources.read_text(\"pilutils.colornames\", \"crayola.json\"))\n_xkcd_names = json.loads(resources.read_text(\"pilutils.colornames\", \"xkcd.json\"))\n_meodai_best_names = json.loads(\n resources.read_text(\"pilutils.colornames\", \"meodai-best.json\")\n)\n_meodai_names = json.loads(resources.read_text(\"pilutils.colornames\", \"meodai.json\"))\n\n\ndef parse_hex6(hex6):\n \"\"\"Example: #ab34df\"\"\"\n if m := re.match(r\"^#?([0-9A-Fa-f]{6})$\", hex6.strip()):\n h = int(m.group(1), 16)\n return hex_to_rgb(h)\n raise ValueError(f\"String {hex6!r} does not match hex6 format.\")\n\n\ndef parse_hex3(hex3):\n \"\"\"Example: #a3d\"\"\"\n if m := re.match(r\"^#?([0-9A-Fa-f]{3})$\", hex3.strip()):\n h3 = m.group(1)\n return tuple(int(c * 2, 16) for c in h3)\n raise ValueError(f\"String {hex3!r} does not match hex3 format.\")\n\n\ndef parse_rgbfunc_int(rgbfunc):\n \"\"\"Example: rgb(171, 52, 223)\"\"\"\n if m := re.match(\n r\"^rgb\\(\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*,\\s*(\\d{1,3})\\s*\\)$\", rgbfunc.strip()\n ):\n t = tuple(map(int, m.groups()))\n if not any(n > 255 for n in t):\n return t\n raise ValueError(f\"String {rgbfunc!r} does not match rgbfunc_int format.\")\n\n\ndef parse_rgbfunc_float(rgbfunc):\n \"\"\"Example: rgb(0.67, 0.2, 0.87)\"\"\"\n if m := re.match(\n r\"^rgb\\(\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*,\\s*([01]\\.\\d+)\\s*\\)$\",\n rgbfunc.strip(),\n ):\n t = tuple(map(float, m.groups()))\n if not any(n > 1 for n in t):\n return tuple(int(round(n * 255)) for n in t)\n raise ValueError(f\"String {rgbfunc!r} does not match rgbfunc_float format.\")\n\n\ndef parse_rgbfunc_percent(rgbfunc):\n \"\"\"Example: rgb(67%, 20%, 87.5%)\"\"\"\n if m := re.match(\n r\"^rgb\\(\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*,\\s*(\\d{1,3}(?:\\.\\d+)?)%\\s*\\)$\",\n rgbfunc.strip(),\n ):\n t = tuple(map(float, m.groups()))\n if not any(n > 100 for n in t):\n return tuple(int(round(n * 255 / 100)) for n in t)\n raise ValueError(f\"String {rgbfunc!r} does not match rgbfunc_percent format.\")\n\n\ndef parse_name_css(name):\n name = name.lower()\n if name not in _css_names:\n raise ValueError(f\"Color {name!r} is not named in the CSS dataset.\")\n return parse_hex6(_css_names[name])\n\n\ndef parse_name_crayola(name):\n name = name.lower()\n if name not in _crayola_names:\n raise ValueError(f\"Color {name!r} is not named in the crayola dataset.\")\n return parse_hex6(_crayola_names[name])\n\n\ndef parse_name_xkcd(name):\n name = name.lower()\n if name not in _xkcd_names:\n raise ValueError(f\"Color {name!r} is not named in the xkcd dataset.\")\n return parse_hex6(_xkcd_names[name])\n\n\ndef parse_name_meodai_best(name):\n name = name.lower()\n if name not in _meodai_best_names:\n raise ValueError(f\"Color {name!r} is not named in the meodai-best dataset.\")\n return parse_hex6(_meodai_best_names[name])\n\n\ndef parse_name_meodai(name):\n name = name.lower()\n if name not in _meodai_names:\n raise ValueError(f\"Color {name!r} is not named in the meodai dataset.\")\n return parse_hex6(_meodai_names[name])\n\n\ndef parse(\n colstr,\n *,\n hex6=True,\n hex3=True,\n rgbfunc_int=True,\n rgbfunc_float=True,\n rgbfunc_percent=True,\n name_css=True,\n name_crayola=True,\n name_xkcd=True,\n name_meodai_best=True,\n name_meodai=True,\n):\n \"\"\"Combine all other parse functions into one \"universal\" function. Use kwargs to disable certain parsers.\"\"\"\n funcs = []\n if hex6:\n funcs.append(parse_hex6)\n if hex3:\n funcs.append(parse_hex3)\n if rgbfunc_int:\n funcs.append(parse_rgbfunc_int)\n if rgbfunc_float:\n funcs.append(parse_rgbfunc_float)\n if rgbfunc_percent:\n funcs.append(parse_rgbfunc_percent)\n if name_css:\n funcs.append(parse_name_css)\n if name_crayola:\n funcs.append(parse_name_crayola)\n if name_xkcd:\n funcs.append(parse_name_xkcd)\n if name_meodai_best:\n funcs.append(parse_name_meodai_best)\n if name_meodai:\n funcs.append(parse_name_meodai)\n\n res = None\n for func in funcs:\n try:\n res = func(colstr)\n except ValueError:\n pass\n if res is None:\n raise ValueError(f\"Could not find a working parser for {colstr!r}.\")\n return res\n",
"step-ids": [
7,
10,
12,
13,
14
]
}
|
[
7,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
class GANLoss(nn.Module):
<|reserved_special_token_0|>
def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,
target_fake_label=0.0, CUDA=False):
""" Initialize the GAN's Discriminator Loss class.
Parameters:
loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.loss_mode = loss_mode
self.which_net = which_net
self.which_D = which_D
self.gpu = CUDA
if loss_mode == 'lsgan':
self.loss = nn.MSELoss()
elif loss_mode in ['vanilla', 'ragan', 'rsgan']:
self.loss = nn.BCEWithLogitsLoss()
elif loss_mode in ['wgan', 'hinge']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % loss_mode
)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
if self.gpu:
target_tensor = target_tensor.cuda()
return target_tensor.expand_as(prediction)
def G_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = real_tensor if self.loss_mode in ['vanilla'
] else fake_tensor
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan']:
loss_fake = self.loss(prediction_fake, real_tensor)
loss_real = self.loss(prediction_real, fake_tensor)
g_loss = loss_fake + loss_real
elif self.loss_mode == 'vanilla':
loss_fake = -self.loss(prediction_fake, fake_tensor)
g_loss = loss_fake
elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':
loss_fake = -prediction_fake.mean()
loss_real = prediction_real.mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'hinge' and self.which_D == 'Ra':
loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()
loss_real = nn.ReLU()(1.0 + prediction_real).mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'rsgan':
loss_fake = self.loss(Dfake - Dreal, real_tensor)
g_loss = loss_fake
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return g_loss
def D_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = Dreal
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:
loss_fake = self.loss(prediction_fake, fake_tensor)
loss_real = self.loss(prediction_real, real_tensor)
elif self.loss_mode == 'wgan':
loss_fake = prediction_fake.mean()
loss_real = -prediction_real.mean()
elif self.loss_mode == 'hinge':
loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()
loss_real = nn.ReLU()(1.0 - prediction_real).mean()
elif self.loss_mode == 'rsgan':
loss_fake = 0.0
loss_real = self.loss(Dreal - Dfake, real_tensor)
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return loss_fake + loss_real
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GANLoss(nn.Module):
<|reserved_special_token_0|>
def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,
target_fake_label=0.0, CUDA=False):
""" Initialize the GAN's Discriminator Loss class.
Parameters:
loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.loss_mode = loss_mode
self.which_net = which_net
self.which_D = which_D
self.gpu = CUDA
if loss_mode == 'lsgan':
self.loss = nn.MSELoss()
elif loss_mode in ['vanilla', 'ragan', 'rsgan']:
self.loss = nn.BCEWithLogitsLoss()
elif loss_mode in ['wgan', 'hinge']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % loss_mode
)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
if self.gpu:
target_tensor = target_tensor.cuda()
return target_tensor.expand_as(prediction)
def G_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = real_tensor if self.loss_mode in ['vanilla'
] else fake_tensor
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan']:
loss_fake = self.loss(prediction_fake, real_tensor)
loss_real = self.loss(prediction_real, fake_tensor)
g_loss = loss_fake + loss_real
elif self.loss_mode == 'vanilla':
loss_fake = -self.loss(prediction_fake, fake_tensor)
g_loss = loss_fake
elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':
loss_fake = -prediction_fake.mean()
loss_real = prediction_real.mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'hinge' and self.which_D == 'Ra':
loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()
loss_real = nn.ReLU()(1.0 + prediction_real).mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'rsgan':
loss_fake = self.loss(Dfake - Dreal, real_tensor)
g_loss = loss_fake
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return g_loss
def D_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = Dreal
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:
loss_fake = self.loss(prediction_fake, fake_tensor)
loss_real = self.loss(prediction_real, real_tensor)
elif self.loss_mode == 'wgan':
loss_fake = prediction_fake.mean()
loss_real = -prediction_real.mean()
elif self.loss_mode == 'hinge':
loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()
loss_real = nn.ReLU()(1.0 - prediction_real).mean()
elif self.loss_mode == 'rsgan':
loss_fake = 0.0
loss_real = self.loss(Dreal - Dfake, real_tensor)
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return loss_fake + loss_real
def __call__(self, Dreal, Dfake):
"""Calculate loss given Discriminator's output and grount truth labels."""
if self.which_net == 'G':
return self.G_loss(Dreal, Dfake)
elif self.which_net == 'D':
return self.D_loss(Dreal, Dfake)
else:
raise NotImplementedError(
'which_net name [%s] is not recognized' % self.which_net)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GANLoss(nn.Module):
"""Define different GAN Discriminator's objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,
target_fake_label=0.0, CUDA=False):
""" Initialize the GAN's Discriminator Loss class.
Parameters:
loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.loss_mode = loss_mode
self.which_net = which_net
self.which_D = which_D
self.gpu = CUDA
if loss_mode == 'lsgan':
self.loss = nn.MSELoss()
elif loss_mode in ['vanilla', 'ragan', 'rsgan']:
self.loss = nn.BCEWithLogitsLoss()
elif loss_mode in ['wgan', 'hinge']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % loss_mode
)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
if self.gpu:
target_tensor = target_tensor.cuda()
return target_tensor.expand_as(prediction)
def G_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = real_tensor if self.loss_mode in ['vanilla'
] else fake_tensor
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan']:
loss_fake = self.loss(prediction_fake, real_tensor)
loss_real = self.loss(prediction_real, fake_tensor)
g_loss = loss_fake + loss_real
elif self.loss_mode == 'vanilla':
loss_fake = -self.loss(prediction_fake, fake_tensor)
g_loss = loss_fake
elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':
loss_fake = -prediction_fake.mean()
loss_real = prediction_real.mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'hinge' and self.which_D == 'Ra':
loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()
loss_real = nn.ReLU()(1.0 + prediction_real).mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'rsgan':
loss_fake = self.loss(Dfake - Dreal, real_tensor)
g_loss = loss_fake
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return g_loss
def D_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = Dreal
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:
loss_fake = self.loss(prediction_fake, fake_tensor)
loss_real = self.loss(prediction_real, real_tensor)
elif self.loss_mode == 'wgan':
loss_fake = prediction_fake.mean()
loss_real = -prediction_real.mean()
elif self.loss_mode == 'hinge':
loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()
loss_real = nn.ReLU()(1.0 - prediction_real).mean()
elif self.loss_mode == 'rsgan':
loss_fake = 0.0
loss_real = self.loss(Dreal - Dfake, real_tensor)
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return loss_fake + loss_real
def __call__(self, Dreal, Dfake):
"""Calculate loss given Discriminator's output and grount truth labels."""
if self.which_net == 'G':
return self.G_loss(Dreal, Dfake)
elif self.which_net == 'D':
return self.D_loss(Dreal, Dfake)
else:
raise NotImplementedError(
'which_net name [%s] is not recognized' % self.which_net)
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import config as cfg
class GANLoss(nn.Module):
"""Define different GAN Discriminator's objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,
target_fake_label=0.0, CUDA=False):
""" Initialize the GAN's Discriminator Loss class.
Parameters:
loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.loss_mode = loss_mode
self.which_net = which_net
self.which_D = which_D
self.gpu = CUDA
if loss_mode == 'lsgan':
self.loss = nn.MSELoss()
elif loss_mode in ['vanilla', 'ragan', 'rsgan']:
self.loss = nn.BCEWithLogitsLoss()
elif loss_mode in ['wgan', 'hinge']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % loss_mode
)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
if self.gpu:
target_tensor = target_tensor.cuda()
return target_tensor.expand_as(prediction)
def G_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = real_tensor if self.loss_mode in ['vanilla'
] else fake_tensor
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan']:
loss_fake = self.loss(prediction_fake, real_tensor)
loss_real = self.loss(prediction_real, fake_tensor)
g_loss = loss_fake + loss_real
elif self.loss_mode == 'vanilla':
loss_fake = -self.loss(prediction_fake, fake_tensor)
g_loss = loss_fake
elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':
loss_fake = -prediction_fake.mean()
loss_real = prediction_real.mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'hinge' and self.which_D == 'Ra':
loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()
loss_real = nn.ReLU()(1.0 + prediction_real).mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'rsgan':
loss_fake = self.loss(Dfake - Dreal, real_tensor)
g_loss = loss_fake
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return g_loss
def D_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = Dreal
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' %
self.which_D)
if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:
loss_fake = self.loss(prediction_fake, fake_tensor)
loss_real = self.loss(prediction_real, real_tensor)
elif self.loss_mode == 'wgan':
loss_fake = prediction_fake.mean()
loss_real = -prediction_real.mean()
elif self.loss_mode == 'hinge':
loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()
loss_real = nn.ReLU()(1.0 - prediction_real).mean()
elif self.loss_mode == 'rsgan':
loss_fake = 0.0
loss_real = self.loss(Dreal - Dfake, real_tensor)
else:
raise NotImplementedError(
'loss_mode name [%s] is not recognized' % self.loss_mode)
return loss_fake + loss_real
def __call__(self, Dreal, Dfake):
"""Calculate loss given Discriminator's output and grount truth labels."""
if self.which_net == 'G':
return self.G_loss(Dreal, Dfake)
elif self.which_net == 'D':
return self.D_loss(Dreal, Dfake)
else:
raise NotImplementedError(
'which_net name [%s] is not recognized' % self.which_net)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : gan_loss.py
# @Time : Created at 2019-07-11
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import config as cfg
class GANLoss(nn.Module):
"""Define different GAN Discriminator's objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0, target_fake_label=0.0, CUDA=False):
""" Initialize the GAN's Discriminator Loss class.
Parameters:
loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.loss_mode = loss_mode
self.which_net = which_net
self.which_D = which_D
self.gpu = CUDA
if loss_mode == 'lsgan':
self.loss = nn.MSELoss()
elif loss_mode in ['vanilla', 'ragan', 'rsgan']:
self.loss = nn.BCEWithLogitsLoss()
elif loss_mode in ['wgan', 'hinge']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % loss_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
if self.gpu:
target_tensor = target_tensor.cuda()
return target_tensor.expand_as(prediction)
def G_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = real_tensor if self.loss_mode in ['vanilla'] else fake_tensor
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' % self.which_D)
if self.loss_mode in ['lsgan', 'ragan']:
loss_fake = self.loss(prediction_fake, real_tensor)
loss_real = self.loss(prediction_real, fake_tensor)
g_loss = loss_fake + loss_real
elif self.loss_mode == 'vanilla':
loss_fake = -self.loss(prediction_fake, fake_tensor)
g_loss = loss_fake
elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':
loss_fake = -prediction_fake.mean()
loss_real = prediction_real.mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'hinge' and self.which_D == 'Ra':
loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()
loss_real = nn.ReLU()(1.0 + prediction_real).mean()
g_loss = loss_fake + loss_real
elif self.loss_mode == 'rsgan':
loss_fake = self.loss(Dfake - Dreal, real_tensor)
g_loss = loss_fake
else:
raise NotImplementedError('loss_mode name [%s] is not recognized' % self.loss_mode)
return g_loss
def D_loss(self, Dreal, Dfake):
if self.loss_mode != 'rsgan' and cfg.d_out_mean:
Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)
Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)
real_tensor = self.get_target_tensor(Dreal, True)
fake_tensor = self.get_target_tensor(Dreal, False)
if self.which_D == 'S':
prediction_fake = Dfake
prediction_real = Dreal
elif self.which_D == 'Ra':
prediction_fake = Dfake - torch.mean(Dreal)
prediction_real = Dreal - torch.mean(Dfake)
else:
raise NotImplementedError('which_D name [%s] is not recognized' % self.which_D)
if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:
loss_fake = self.loss(prediction_fake, fake_tensor)
loss_real = self.loss(prediction_real, real_tensor)
elif self.loss_mode == 'wgan':
loss_fake = prediction_fake.mean()
loss_real = -prediction_real.mean()
elif self.loss_mode == 'hinge':
loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()
loss_real = nn.ReLU()(1.0 - prediction_real).mean()
elif self.loss_mode == 'rsgan':
loss_fake = 0.
loss_real = self.loss(Dreal - Dfake, real_tensor)
else:
raise NotImplementedError('loss_mode name [%s] is not recognized' % self.loss_mode)
return loss_fake + loss_real
def __call__(self, Dreal, Dfake):
"""Calculate loss given Discriminator's output and grount truth labels."""
if self.which_net == 'G':
return self.G_loss(Dreal, Dfake)
elif self.which_net == 'D':
return self.D_loss(Dreal, Dfake)
else:
raise NotImplementedError('which_net name [%s] is not recognized' % self.which_net)
|
flexible
|
{
"blob_id": "9cea998d7d5cad3ddc00f667ca06151a938d48a1",
"index": 9424,
"step-1": "<mask token>\n\n\nclass GANLoss(nn.Module):\n <mask token>\n\n def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,\n target_fake_label=0.0, CUDA=False):\n \"\"\" Initialize the GAN's Discriminator Loss class.\n\n Parameters:\n loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.loss_mode = loss_mode\n self.which_net = which_net\n self.which_D = which_D\n self.gpu = CUDA\n if loss_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif loss_mode in ['vanilla', 'ragan', 'rsgan']:\n self.loss = nn.BCEWithLogitsLoss()\n elif loss_mode in ['wgan', 'hinge']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % loss_mode\n )\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n if self.gpu:\n target_tensor = target_tensor.cuda()\n return target_tensor.expand_as(prediction)\n\n def G_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = real_tensor if self.loss_mode in ['vanilla'\n ] else fake_tensor\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan']:\n loss_fake = self.loss(prediction_fake, real_tensor)\n loss_real = self.loss(prediction_real, fake_tensor)\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'vanilla':\n loss_fake = -self.loss(prediction_fake, fake_tensor)\n g_loss = loss_fake\n elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':\n loss_fake = -prediction_fake.mean()\n loss_real = prediction_real.mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'hinge' and self.which_D == 'Ra':\n loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 + prediction_real).mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'rsgan':\n loss_fake = self.loss(Dfake - Dreal, real_tensor)\n g_loss = loss_fake\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return g_loss\n\n def D_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = Dreal\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:\n loss_fake = self.loss(prediction_fake, fake_tensor)\n loss_real = self.loss(prediction_real, real_tensor)\n elif self.loss_mode == 'wgan':\n loss_fake = prediction_fake.mean()\n loss_real = -prediction_real.mean()\n elif self.loss_mode == 'hinge':\n loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 - prediction_real).mean()\n elif self.loss_mode == 'rsgan':\n loss_fake = 0.0\n loss_real = self.loss(Dreal - Dfake, real_tensor)\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return loss_fake + loss_real\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GANLoss(nn.Module):\n <mask token>\n\n def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,\n target_fake_label=0.0, CUDA=False):\n \"\"\" Initialize the GAN's Discriminator Loss class.\n\n Parameters:\n loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.loss_mode = loss_mode\n self.which_net = which_net\n self.which_D = which_D\n self.gpu = CUDA\n if loss_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif loss_mode in ['vanilla', 'ragan', 'rsgan']:\n self.loss = nn.BCEWithLogitsLoss()\n elif loss_mode in ['wgan', 'hinge']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % loss_mode\n )\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n if self.gpu:\n target_tensor = target_tensor.cuda()\n return target_tensor.expand_as(prediction)\n\n def G_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = real_tensor if self.loss_mode in ['vanilla'\n ] else fake_tensor\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan']:\n loss_fake = self.loss(prediction_fake, real_tensor)\n loss_real = self.loss(prediction_real, fake_tensor)\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'vanilla':\n loss_fake = -self.loss(prediction_fake, fake_tensor)\n g_loss = loss_fake\n elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':\n loss_fake = -prediction_fake.mean()\n loss_real = prediction_real.mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'hinge' and self.which_D == 'Ra':\n loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 + prediction_real).mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'rsgan':\n loss_fake = self.loss(Dfake - Dreal, real_tensor)\n g_loss = loss_fake\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return g_loss\n\n def D_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = Dreal\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:\n loss_fake = self.loss(prediction_fake, fake_tensor)\n loss_real = self.loss(prediction_real, real_tensor)\n elif self.loss_mode == 'wgan':\n loss_fake = prediction_fake.mean()\n loss_real = -prediction_real.mean()\n elif self.loss_mode == 'hinge':\n loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 - prediction_real).mean()\n elif self.loss_mode == 'rsgan':\n loss_fake = 0.0\n loss_real = self.loss(Dreal - Dfake, real_tensor)\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return loss_fake + loss_real\n\n def __call__(self, Dreal, Dfake):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\"\"\"\n if self.which_net == 'G':\n return self.G_loss(Dreal, Dfake)\n elif self.which_net == 'D':\n return self.D_loss(Dreal, Dfake)\n else:\n raise NotImplementedError(\n 'which_net name [%s] is not recognized' % self.which_net)\n",
"step-3": "<mask token>\n\n\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN Discriminator's objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,\n target_fake_label=0.0, CUDA=False):\n \"\"\" Initialize the GAN's Discriminator Loss class.\n\n Parameters:\n loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.loss_mode = loss_mode\n self.which_net = which_net\n self.which_D = which_D\n self.gpu = CUDA\n if loss_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif loss_mode in ['vanilla', 'ragan', 'rsgan']:\n self.loss = nn.BCEWithLogitsLoss()\n elif loss_mode in ['wgan', 'hinge']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % loss_mode\n )\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n if self.gpu:\n target_tensor = target_tensor.cuda()\n return target_tensor.expand_as(prediction)\n\n def G_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = real_tensor if self.loss_mode in ['vanilla'\n ] else fake_tensor\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan']:\n loss_fake = self.loss(prediction_fake, real_tensor)\n loss_real = self.loss(prediction_real, fake_tensor)\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'vanilla':\n loss_fake = -self.loss(prediction_fake, fake_tensor)\n g_loss = loss_fake\n elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':\n loss_fake = -prediction_fake.mean()\n loss_real = prediction_real.mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'hinge' and self.which_D == 'Ra':\n loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 + prediction_real).mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'rsgan':\n loss_fake = self.loss(Dfake - Dreal, real_tensor)\n g_loss = loss_fake\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return g_loss\n\n def D_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = Dreal\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:\n loss_fake = self.loss(prediction_fake, fake_tensor)\n loss_real = self.loss(prediction_real, real_tensor)\n elif self.loss_mode == 'wgan':\n loss_fake = prediction_fake.mean()\n loss_real = -prediction_real.mean()\n elif self.loss_mode == 'hinge':\n loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 - prediction_real).mean()\n elif self.loss_mode == 'rsgan':\n loss_fake = 0.0\n loss_real = self.loss(Dreal - Dfake, real_tensor)\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return loss_fake + loss_real\n\n def __call__(self, Dreal, Dfake):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\"\"\"\n if self.which_net == 'G':\n return self.G_loss(Dreal, Dfake)\n elif self.which_net == 'D':\n return self.D_loss(Dreal, Dfake)\n else:\n raise NotImplementedError(\n 'which_net name [%s] is not recognized' % self.which_net)\n",
"step-4": "import torch\nimport torch.nn as nn\nimport config as cfg\n\n\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN Discriminator's objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0,\n target_fake_label=0.0, CUDA=False):\n \"\"\" Initialize the GAN's Discriminator Loss class.\n\n Parameters:\n loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.loss_mode = loss_mode\n self.which_net = which_net\n self.which_D = which_D\n self.gpu = CUDA\n if loss_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif loss_mode in ['vanilla', 'ragan', 'rsgan']:\n self.loss = nn.BCEWithLogitsLoss()\n elif loss_mode in ['wgan', 'hinge']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % loss_mode\n )\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n if self.gpu:\n target_tensor = target_tensor.cuda()\n return target_tensor.expand_as(prediction)\n\n def G_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = real_tensor if self.loss_mode in ['vanilla'\n ] else fake_tensor\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan']:\n loss_fake = self.loss(prediction_fake, real_tensor)\n loss_real = self.loss(prediction_real, fake_tensor)\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'vanilla':\n loss_fake = -self.loss(prediction_fake, fake_tensor)\n g_loss = loss_fake\n elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':\n loss_fake = -prediction_fake.mean()\n loss_real = prediction_real.mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'hinge' and self.which_D == 'Ra':\n loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 + prediction_real).mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'rsgan':\n loss_fake = self.loss(Dfake - Dreal, real_tensor)\n g_loss = loss_fake\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return g_loss\n\n def D_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = Dreal\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' %\n self.which_D)\n if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:\n loss_fake = self.loss(prediction_fake, fake_tensor)\n loss_real = self.loss(prediction_real, real_tensor)\n elif self.loss_mode == 'wgan':\n loss_fake = prediction_fake.mean()\n loss_real = -prediction_real.mean()\n elif self.loss_mode == 'hinge':\n loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 - prediction_real).mean()\n elif self.loss_mode == 'rsgan':\n loss_fake = 0.0\n loss_real = self.loss(Dreal - Dfake, real_tensor)\n else:\n raise NotImplementedError(\n 'loss_mode name [%s] is not recognized' % self.loss_mode)\n return loss_fake + loss_real\n\n def __call__(self, Dreal, Dfake):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\"\"\"\n if self.which_net == 'G':\n return self.G_loss(Dreal, Dfake)\n elif self.which_net == 'D':\n return self.D_loss(Dreal, Dfake)\n else:\n raise NotImplementedError(\n 'which_net name [%s] is not recognized' % self.which_net)\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Author : William\n# @Project : TextGAN-william\n# @FileName : gan_loss.py\n# @Time : Created at 2019-07-11\n# @Blog : http://zhiweil.ml/\n# @Description : \n# Copyrights (C) 2018. All Rights Reserved.\n\nimport torch\nimport torch.nn as nn\n\nimport config as cfg\n\n\nclass GANLoss(nn.Module):\n \"\"\"Define different GAN Discriminator's objectives.\n\n The GANLoss class abstracts away the need to create the target label tensor\n that has the same size as the input.\n \"\"\"\n\n def __init__(self, loss_mode, which_net, which_D, target_real_label=1.0, target_fake_label=0.0, CUDA=False):\n \"\"\" Initialize the GAN's Discriminator Loss class.\n\n Parameters:\n loss_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.\n target_real_label (bool) - - label for a real image\n target_fake_label (bool) - - label of a fake image\n\n Note: Do not use sigmoid as the last layer of Discriminator.\n LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.\n \"\"\"\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.loss_mode = loss_mode\n self.which_net = which_net\n self.which_D = which_D\n self.gpu = CUDA\n\n if loss_mode == 'lsgan':\n self.loss = nn.MSELoss()\n elif loss_mode in ['vanilla', 'ragan', 'rsgan']:\n self.loss = nn.BCEWithLogitsLoss()\n elif loss_mode in ['wgan', 'hinge']:\n self.loss = None\n else:\n raise NotImplementedError('gan mode %s not implemented' % loss_mode)\n\n def get_target_tensor(self, prediction, target_is_real):\n \"\"\"Create label tensors with the same size as the input.\n Parameters:\n prediction (tensor) - - tpyically the prediction from a discriminator\n target_is_real (bool) - - if the ground truth label is for real images or fake images\n Returns:\n A label tensor filled with ground truth label, and with the size of the input\n \"\"\"\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n if self.gpu:\n target_tensor = target_tensor.cuda()\n return target_tensor.expand_as(prediction)\n\n def G_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = real_tensor if self.loss_mode in ['vanilla'] else fake_tensor\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' % self.which_D)\n\n if self.loss_mode in ['lsgan', 'ragan']:\n loss_fake = self.loss(prediction_fake, real_tensor)\n loss_real = self.loss(prediction_real, fake_tensor)\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'vanilla':\n loss_fake = -self.loss(prediction_fake, fake_tensor)\n g_loss = loss_fake\n elif self.loss_mode in ['wgan', 'hinge'] and self.which_D == 'S':\n loss_fake = -prediction_fake.mean()\n loss_real = prediction_real.mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'hinge' and self.which_D == 'Ra':\n loss_fake = nn.ReLU()(1.0 - prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 + prediction_real).mean()\n g_loss = loss_fake + loss_real\n elif self.loss_mode == 'rsgan':\n loss_fake = self.loss(Dfake - Dreal, real_tensor)\n g_loss = loss_fake\n else:\n raise NotImplementedError('loss_mode name [%s] is not recognized' % self.loss_mode)\n\n return g_loss\n\n def D_loss(self, Dreal, Dfake):\n if self.loss_mode != 'rsgan' and cfg.d_out_mean:\n Dfake = torch.mean(Dfake.view(cfg.batch_size, -1), dim=-1)\n Dreal = torch.mean(Dreal.view(cfg.batch_size, -1), dim=-1)\n\n real_tensor = self.get_target_tensor(Dreal, True)\n fake_tensor = self.get_target_tensor(Dreal, False)\n\n if self.which_D == 'S':\n prediction_fake = Dfake\n prediction_real = Dreal\n elif self.which_D == 'Ra':\n prediction_fake = Dfake - torch.mean(Dreal)\n prediction_real = Dreal - torch.mean(Dfake)\n else:\n raise NotImplementedError('which_D name [%s] is not recognized' % self.which_D)\n\n if self.loss_mode in ['lsgan', 'ragan', 'vanilla']:\n loss_fake = self.loss(prediction_fake, fake_tensor)\n loss_real = self.loss(prediction_real, real_tensor)\n elif self.loss_mode == 'wgan':\n loss_fake = prediction_fake.mean()\n loss_real = -prediction_real.mean()\n elif self.loss_mode == 'hinge':\n loss_fake = nn.ReLU()(1.0 + prediction_fake).mean()\n loss_real = nn.ReLU()(1.0 - prediction_real).mean()\n elif self.loss_mode == 'rsgan':\n loss_fake = 0.\n loss_real = self.loss(Dreal - Dfake, real_tensor)\n else:\n raise NotImplementedError('loss_mode name [%s] is not recognized' % self.loss_mode)\n\n return loss_fake + loss_real\n\n def __call__(self, Dreal, Dfake):\n \"\"\"Calculate loss given Discriminator's output and grount truth labels.\"\"\"\n if self.which_net == 'G':\n return self.G_loss(Dreal, Dfake)\n elif self.which_net == 'D':\n return self.D_loss(Dreal, Dfake)\n else:\n raise NotImplementedError('which_net name [%s] is not recognized' % self.which_net)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config.set_file('config.yaml')
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = confuse.Configuration('SleepCycleWebhooks')
config.set_file('config.yaml')
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
<|reserved_special_token_1|>
from functools import reduce
import confuse
config = confuse.Configuration('SleepCycleWebhooks')
config.set_file('config.yaml')
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
|
flexible
|
{
"blob_id": "16879598a8b1a0b23c5ea6de18f8fb0b0b77201c",
"index": 1360,
"step-1": "<mask token>\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n",
"step-2": "<mask token>\nconfig.set_file('config.yaml')\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n",
"step-3": "<mask token>\nconfig = confuse.Configuration('SleepCycleWebhooks')\nconfig.set_file('config.yaml')\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n",
"step-4": "from functools import reduce\nimport confuse\nconfig = confuse.Configuration('SleepCycleWebhooks')\nconfig.set_file('config.yaml')\n\n\ndef get(path):\n return reduce(lambda view, part: view[part], path.split('.'), config).get()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZooAnnouncer(ZooAnnouncerInterface):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZooAnnouncer(ZooAnnouncerInterface):
def updateZoo(self, annoucement):
print('ZooAnnouncer :' + annoucement)
<|reserved_special_token_1|>
import ZooAnnouncerInterface
class ZooAnnouncer(ZooAnnouncerInterface):
def updateZoo(self, annoucement):
print('ZooAnnouncer :' + annoucement)
<|reserved_special_token_1|>
import ZooAnnouncerInterface
class ZooAnnouncer(ZooAnnouncerInterface):
def updateZoo(self,annoucement):
print("ZooAnnouncer :" + annoucement)
|
flexible
|
{
"blob_id": "be9c21ee04a612f711a1e6a82ea9478c77b62a82",
"index": 8112,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n\n def updateZoo(self, annoucement):\n print('ZooAnnouncer :' + annoucement)\n",
"step-4": "import ZooAnnouncerInterface\n\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n\n def updateZoo(self, annoucement):\n print('ZooAnnouncer :' + annoucement)\n",
"step-5": "import ZooAnnouncerInterface\n\nclass ZooAnnouncer(ZooAnnouncerInterface):\n def updateZoo(self,annoucement):\n print(\"ZooAnnouncer :\" + annoucement)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
x = 25
y = 43
print(x & y)
print(x >> y)
print(x ^ y)
print(x | y)
|
normal
|
{
"blob_id": "34d011727c93bb4c8ccf64017e7185717ef98667",
"index": 2603,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x & y)\nprint(x >> y)\nprint(x ^ y)\nprint(x | y)\n",
"step-3": "x = 25\ny = 43\nprint(x & y)\nprint(x >> y)\nprint(x ^ y)\nprint(x | y)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('')
print('Lesson #2')
print('Program start:')
for i in a:
if i < 9:
print(i)
print('End')
<|reserved_special_token_1|>
a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]
print('')
print('Lesson #2')
print('Program start:')
for i in a:
if i < 9:
print(i)
print('End')
<|reserved_special_token_1|>
a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]
print("")
print("Lesson #2")
print("Program start:")
for i in a:
if i < 9:
print(i)
print("End")
|
flexible
|
{
"blob_id": "58f7810e2731721562e3459f92684589dc66862c",
"index": 881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('')\nprint('Lesson #2')\nprint('Program start:')\nfor i in a:\n if i < 9:\n print(i)\nprint('End')\n",
"step-3": "a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]\nprint('')\nprint('Lesson #2')\nprint('Program start:')\nfor i in a:\n if i < 9:\n print(i)\nprint('End')\n",
"step-4": "a = [3, 4, 2, 3, 5, 8, 23, 32, 35, 34, 4, 6, 9]\n\nprint(\"\")\nprint(\"Lesson #2\")\nprint(\"Program start:\")\nfor i in a:\n if i < 9:\n print(i)\nprint(\"End\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
myfavoritenumber = 5
print(myfavoritenumber)
x = 5
x = x + 1
print(x)
x, y, z = 1, 2, 3
print(x, y, z)
|
normal
|
{
"blob_id": "e6c7b15e5b42cfe6c5dec2eaf397b67afd716ebd",
"index": 3858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(myfavoritenumber)\n<mask token>\nprint(x)\n<mask token>\nprint(x, y, z)\n",
"step-3": "myfavoritenumber = 5\nprint(myfavoritenumber)\nx = 5\nx = x + 1\nprint(x)\nx, y, z = 1, 2, 3\nprint(x, y, z)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
url = 'https://jsonplaceholder.typicode.com'
if len(sys.argv) > 1:
user_id = sys.argv[1]
name = requests.get('{}/users/{}'.format(url, user_id)).json().get(
'name')
r = requests.get('{}/todos?userId={}'.format(url, user_id)).json()
tasks_completed = []
for task in r:
if task.get('completed') is True:
tasks_completed.append(task)
print('Employee {} is done with tasks({:d}/{:d}):'.format(name, len
(tasks_completed), len(r)))
if len(tasks_completed) > 0:
for task in tasks_completed:
print('\t {}'.format(task.get('title')))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
import requests
import sys
if __name__ == '__main__':
url = 'https://jsonplaceholder.typicode.com'
if len(sys.argv) > 1:
user_id = sys.argv[1]
name = requests.get('{}/users/{}'.format(url, user_id)).json().get(
'name')
r = requests.get('{}/todos?userId={}'.format(url, user_id)).json()
tasks_completed = []
for task in r:
if task.get('completed') is True:
tasks_completed.append(task)
print('Employee {} is done with tasks({:d}/{:d}):'.format(name, len
(tasks_completed), len(r)))
if len(tasks_completed) > 0:
for task in tasks_completed:
print('\t {}'.format(task.get('title')))
<|reserved_special_token_1|>
#!/usr/bin/python3
"""
Requests username and tasks from JSON Placeholder
based on userid (which is sys.argv[1])
"""
import json
import requests
import sys
if __name__ == "__main__":
url = "https://jsonplaceholder.typicode.com"
if len(sys.argv) > 1:
user_id = sys.argv[1]
name = requests.get("{}/users/{}".format(
url, user_id)).json().get("name")
r = requests.get("{}/todos?userId={}".format(
url, user_id)).json()
tasks_completed = []
for task in r:
if task.get("completed") is True:
tasks_completed.append(task)
print("Employee {} is done with tasks({:d}/{:d}):".format(
name, len(tasks_completed), len(r)))
if len(tasks_completed) > 0:
for task in tasks_completed:
print("\t {}".format(task.get("title")))
|
flexible
|
{
"blob_id": "e1a2b33a1ec7aca21a157895d8c7c5b5f29ff49c",
"index": 5047,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n url = 'https://jsonplaceholder.typicode.com'\n if len(sys.argv) > 1:\n user_id = sys.argv[1]\n name = requests.get('{}/users/{}'.format(url, user_id)).json().get(\n 'name')\n r = requests.get('{}/todos?userId={}'.format(url, user_id)).json()\n tasks_completed = []\n for task in r:\n if task.get('completed') is True:\n tasks_completed.append(task)\n print('Employee {} is done with tasks({:d}/{:d}):'.format(name, len\n (tasks_completed), len(r)))\n if len(tasks_completed) > 0:\n for task in tasks_completed:\n print('\\t {}'.format(task.get('title')))\n",
"step-3": "<mask token>\nimport json\nimport requests\nimport sys\nif __name__ == '__main__':\n url = 'https://jsonplaceholder.typicode.com'\n if len(sys.argv) > 1:\n user_id = sys.argv[1]\n name = requests.get('{}/users/{}'.format(url, user_id)).json().get(\n 'name')\n r = requests.get('{}/todos?userId={}'.format(url, user_id)).json()\n tasks_completed = []\n for task in r:\n if task.get('completed') is True:\n tasks_completed.append(task)\n print('Employee {} is done with tasks({:d}/{:d}):'.format(name, len\n (tasks_completed), len(r)))\n if len(tasks_completed) > 0:\n for task in tasks_completed:\n print('\\t {}'.format(task.get('title')))\n",
"step-4": "#!/usr/bin/python3\n\"\"\"\nRequests username and tasks from JSON Placeholder\nbased on userid (which is sys.argv[1])\n\"\"\"\nimport json\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n url = \"https://jsonplaceholder.typicode.com\"\n if len(sys.argv) > 1:\n user_id = sys.argv[1]\n name = requests.get(\"{}/users/{}\".format(\n url, user_id)).json().get(\"name\")\n r = requests.get(\"{}/todos?userId={}\".format(\n url, user_id)).json()\n tasks_completed = []\n for task in r:\n if task.get(\"completed\") is True:\n tasks_completed.append(task)\n print(\"Employee {} is done with tasks({:d}/{:d}):\".format(\n name, len(tasks_completed), len(r)))\n if len(tasks_completed) > 0:\n for task in tasks_completed:\n print(\"\\t {}\".format(task.get(\"title\")))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
short_train <- read.csv('short_train.csv', header=TRUE)
#delete unnecessary columns
short_train[1] <- NULL
#remove ngrams containing @user_
regexp <- "@[a-zA-Z0-9_]*"
gsubtry <- gsub(pattern = regexp, replacement = "", x = short_train$Tweet)
#merge gsubtry back into short_train, rename as Tweet
short_train_clean <- cbind(short_train, gsubtry)
short_train_clean[2] <- NULL
names(short_train_clean)[3] <- "Tweet"
|
normal
|
{
"blob_id": "48a970b35aa7fd677828f5d7bd5f1dcf24511b01",
"index": 9098,
"step-1": "short_train <- read.csv('short_train.csv', header=TRUE)\n\n#delete unnecessary columns\nshort_train[1] <- NULL\n\n#remove ngrams containing @user_\nregexp <- \"@[a-zA-Z0-9_]*\"\ngsubtry <- gsub(pattern = regexp, replacement = \"\", x = short_train$Tweet)\n\n#merge gsubtry back into short_train, rename as Tweet\nshort_train_clean <- cbind(short_train, gsubtry)\nshort_train_clean[2] <- NULL\nnames(short_train_clean)[3] <- \"Tweet\"",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from oil_prices import *
with_without = 'without training'
show_plot = 'yes'
print('START')
# Defining the past and future sequences for the LSTM training
n_past = 8
n_future = 1
target_date = '2018-11-16'
past = ['t']+['t-'+str(i) for i in range(1,n_past)]
future = ['t+'+str(i) for i in range(1,n_future+1)]
# Importing and feature engineering data
print(' - Imports data and formats the data')
data = data_import()
df = data_imputing(data)
df_train, df_predict = train_predict_split(df, n_past, n_future)
scaler = data_scaler(df_train)
timeseries_to_supervised(df_train, n_past, n_future)
# Training the model anew if needed, otherwise, just loaded a pre-trained model
model_name = 'WTI_oil_price.mdl'
if with_without == 'with training':
print(' - Training the LSTM model')
model_trainer(df_train, n_past, n_future, model_name)
print(' - Loading the LSTM model')
model = tf.keras.models.load_model(model_name, custom_objects=None, compile=True)
# Validating the neural net by predicting all of the set and comparing with the observed data
df_train = make_many_predictions(df_train, model, past, n_future)
df_train = real_price_prediction(df_train, scaler)
# Predicting the oil price on Friday, November 16th, 2018.
prediction_run_forward(df_predict, target_date, scaler, model)
target_WTI_price = df_predict[df_predict['DATE'] == target_date]['WTI'].values[0]
print('Price of WTI oil on {}: $ {}'.format(target_date, target_WTI_price))
if show_plot == 'yes':
data_plot()
plot_real_prediction(df_train)
plot_prediction(df_predict, target_WTI_price, target_date)
print('END')
|
normal
|
{
"blob_id": "ec6067cc86b6ac702123d13911cc4ab97be6a857",
"index": 4077,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('START')\n<mask token>\nprint(' - Imports data and formats the data')\n<mask token>\ntimeseries_to_supervised(df_train, n_past, n_future)\n<mask token>\nif with_without == 'with training':\n print(' - Training the LSTM model')\n model_trainer(df_train, n_past, n_future, model_name)\nprint(' - Loading the LSTM model')\n<mask token>\nprediction_run_forward(df_predict, target_date, scaler, model)\n<mask token>\nprint('Price of WTI oil on {}: $ {}'.format(target_date, target_WTI_price))\nif show_plot == 'yes':\n data_plot()\n plot_real_prediction(df_train)\n plot_prediction(df_predict, target_WTI_price, target_date)\nprint('END')\n",
"step-3": "<mask token>\nwith_without = 'without training'\nshow_plot = 'yes'\nprint('START')\nn_past = 8\nn_future = 1\ntarget_date = '2018-11-16'\npast = ['t'] + [('t-' + str(i)) for i in range(1, n_past)]\nfuture = [('t+' + str(i)) for i in range(1, n_future + 1)]\nprint(' - Imports data and formats the data')\ndata = data_import()\ndf = data_imputing(data)\ndf_train, df_predict = train_predict_split(df, n_past, n_future)\nscaler = data_scaler(df_train)\ntimeseries_to_supervised(df_train, n_past, n_future)\nmodel_name = 'WTI_oil_price.mdl'\nif with_without == 'with training':\n print(' - Training the LSTM model')\n model_trainer(df_train, n_past, n_future, model_name)\nprint(' - Loading the LSTM model')\nmodel = tf.keras.models.load_model(model_name, custom_objects=None, compile\n =True)\ndf_train = make_many_predictions(df_train, model, past, n_future)\ndf_train = real_price_prediction(df_train, scaler)\nprediction_run_forward(df_predict, target_date, scaler, model)\ntarget_WTI_price = df_predict[df_predict['DATE'] == target_date]['WTI'].values[\n 0]\nprint('Price of WTI oil on {}: $ {}'.format(target_date, target_WTI_price))\nif show_plot == 'yes':\n data_plot()\n plot_real_prediction(df_train)\n plot_prediction(df_predict, target_WTI_price, target_date)\nprint('END')\n",
"step-4": "from oil_prices import *\nwith_without = 'without training'\nshow_plot = 'yes'\nprint('START')\nn_past = 8\nn_future = 1\ntarget_date = '2018-11-16'\npast = ['t'] + [('t-' + str(i)) for i in range(1, n_past)]\nfuture = [('t+' + str(i)) for i in range(1, n_future + 1)]\nprint(' - Imports data and formats the data')\ndata = data_import()\ndf = data_imputing(data)\ndf_train, df_predict = train_predict_split(df, n_past, n_future)\nscaler = data_scaler(df_train)\ntimeseries_to_supervised(df_train, n_past, n_future)\nmodel_name = 'WTI_oil_price.mdl'\nif with_without == 'with training':\n print(' - Training the LSTM model')\n model_trainer(df_train, n_past, n_future, model_name)\nprint(' - Loading the LSTM model')\nmodel = tf.keras.models.load_model(model_name, custom_objects=None, compile\n =True)\ndf_train = make_many_predictions(df_train, model, past, n_future)\ndf_train = real_price_prediction(df_train, scaler)\nprediction_run_forward(df_predict, target_date, scaler, model)\ntarget_WTI_price = df_predict[df_predict['DATE'] == target_date]['WTI'].values[\n 0]\nprint('Price of WTI oil on {}: $ {}'.format(target_date, target_WTI_price))\nif show_plot == 'yes':\n data_plot()\n plot_real_prediction(df_train)\n plot_prediction(df_predict, target_WTI_price, target_date)\nprint('END')\n",
"step-5": "from oil_prices import *\n\n\nwith_without = 'without training'\nshow_plot = 'yes'\n\nprint('START')\n\n# Defining the past and future sequences for the LSTM training\nn_past = 8\nn_future = 1\ntarget_date = '2018-11-16'\npast = ['t']+['t-'+str(i) for i in range(1,n_past)]\nfuture = ['t+'+str(i) for i in range(1,n_future+1)]\n\n# Importing and feature engineering data\nprint(' - Imports data and formats the data')\ndata = data_import()\ndf = data_imputing(data)\ndf_train, df_predict = train_predict_split(df, n_past, n_future)\nscaler = data_scaler(df_train)\ntimeseries_to_supervised(df_train, n_past, n_future)\n\n# Training the model anew if needed, otherwise, just loaded a pre-trained model\nmodel_name = 'WTI_oil_price.mdl'\nif with_without == 'with training':\n\tprint(' - Training the LSTM model')\n\tmodel_trainer(df_train, n_past, n_future, model_name)\nprint(' - Loading the LSTM model')\nmodel = tf.keras.models.load_model(model_name, custom_objects=None, compile=True)\n\n# Validating the neural net by predicting all of the set and comparing with the observed data\ndf_train = make_many_predictions(df_train, model, past, n_future)\ndf_train = real_price_prediction(df_train, scaler)\n\n\n# Predicting the oil price on Friday, November 16th, 2018.\nprediction_run_forward(df_predict, target_date, scaler, model)\ntarget_WTI_price = df_predict[df_predict['DATE'] == target_date]['WTI'].values[0]\nprint('Price of WTI oil on {}: $ {}'.format(target_date, target_WTI_price))\n\nif show_plot == 'yes':\n\tdata_plot()\n\tplot_real_prediction(df_train)\n\tplot_prediction(df_predict, target_WTI_price, target_date)\n\nprint('END')\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -coding: UTF-8 -*-
# @Time : 2020/06/24 20:01
# @Author: Liangping_Chen
# @E-mail: chenliangping_2018@foxmail.com
import requests
def http_request(url,data,token=None,method='post'):
header = {'X-Lemonban-Media-Type': 'lemonban.v2',
'Authorization':token}
#判断是get请求还是post请求
if method=='get':
# 发起注册&登录
result = requests.get(url, json=data, headers=header)
else:
result = requests.post(url, json=data, headers=header)
return result.json()#return返回指定的结果
if __name__ == '__main__':
login_url='http://120.78.128.25:8766/futureloan/member/login'
login_data={'mobile_phone':13665929730,'pwd':'12345678'}
response=http_request(login_url,login_data)
print('登录的结果是:{}'.format(response))
#充值
token=response['data']['token_info']['token']
rec_url='http://120.78.128.25:8766/futureloan/member/recharge'
rec_data = {'member_id': 200170, 'amount': 123456}
print(http_request(rec_url,rec_data,"bearer "+token))
|
normal
|
{
"blob_id": "dd7c7fa6493a43988e1c8079797f6ff9b4d239dd",
"index": 4672,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\nif __name__ == '__main__':\n login_url = 'http://120.78.128.25:8766/futureloan/member/login'\n login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}\n response = http_request(login_url, login_data)\n print('登录的结果是:{}'.format(response))\n token = response['data']['token_info']['token']\n rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url, rec_data, 'bearer ' + token))\n",
"step-4": "import requests\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\nif __name__ == '__main__':\n login_url = 'http://120.78.128.25:8766/futureloan/member/login'\n login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}\n response = http_request(login_url, login_data)\n print('登录的结果是:{}'.format(response))\n token = response['data']['token_info']['token']\n rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url, rec_data, 'bearer ' + token))\n",
"step-5": "# -coding: UTF-8 -*-\n# @Time : 2020/06/24 20:01\n# @Author: Liangping_Chen\n# @E-mail: chenliangping_2018@foxmail.com\n\nimport requests\ndef http_request(url,data,token=None,method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2',\n 'Authorization':token}\n #判断是get请求还是post请求\n if method=='get':\n # 发起注册&登录\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n\n return result.json()#return返回指定的结果\nif __name__ == '__main__':\n\n login_url='http://120.78.128.25:8766/futureloan/member/login'\n login_data={'mobile_phone':13665929730,'pwd':'12345678'}\n response=http_request(login_url,login_data)\n print('登录的结果是:{}'.format(response))\n\n #充值\n token=response['data']['token_info']['token']\n rec_url='http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url,rec_data,\"bearer \"+token))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# pylint: disable=W0621,C0114,C0116,W0212,W0613
import io
import textwrap
from typing import cast, Any, Dict
import toml
import pytest
from dae.testing import convert_to_tab_separated
from dae.configuration.gpf_config_parser import GPFConfigParser
from dae.configuration.schemas.person_sets import person_set_collections_schema
from dae.pedigrees.loader import FamiliesLoader
from dae.person_sets import PersonSetCollection
from impala_storage.schema1.impala_variants import ImpalaVariants
@pytest.fixture
def families_fixture():
ped_content = io.StringIO(convert_to_tab_separated(
"""
familyId personId dadId momId sex status role
f1 mom1 0 0 2 1 mom
f1 dad1 0 0 1 1 dad
f1 prb1 dad1 mom1 1 2 prb
f1 sib1 dad1 mom1 2 2 sib
f1 sib2 dad1 mom1 2 2 sib
f2 grmom2 0 0 2 0 maternal_grandmother
f2 grdad2 0 0 1 0 maternal_grandfather
f2 mom2 grdad2 grmom2 2 1 mom
f2 dad2 0 0 1 1 dad
f2 prb2 dad2 mom2 1 2 prb
f2 sib2_3 dad2 mom2 2 2 sib
"""))
families = FamiliesLoader(ped_content).load()
assert families is not None
return families
def get_person_set_collections_config(content: str):
return GPFConfigParser.process_config(
cast(Dict[str, Any], toml.loads(content)),
{"person_set_collections": person_set_collections_schema},
).person_set_collections
@pytest.fixture
def status_collection(families_fixture):
content = textwrap.dedent(
"""
[person_set_collections]
selected_person_set_collections = ["status"]
status.id = "status"
status.name = "Affected Status"
status.sources = [{ from = "pedigree", source = "status" }]
status.domain = [
{
id = "affected",
name = "Affected",
values = ["affected"],
color = "#aabbcc"
},
{
id = "unaffected",
name = "Unaffected",
values = ["unaffected"],
color = "#ffffff"
},
]
status.default = {id = "unknown",name = "Unknown",color = "#aaaaaa"}
""")
config = get_person_set_collections_config(content)
collection = PersonSetCollection.from_families(
config.status, families_fixture)
return collection
def test_status_person_set_collection(status_collection):
assert status_collection is not None
psc = status_collection
assert len(psc.person_sets) == 3
assert len(psc.person_sets["unknown"].persons) == 2
assert len(psc.person_sets["affected"].persons) == 5
assert len(psc.person_sets["unaffected"].persons) == 4
def test_status_person_set_collection_all_selected(
status_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_collection,
("status", {"affected", "unaffected", "unknown"})
)
assert query == ()
def test_status_person_set_collection_some_selected_no_default(
status_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_collection,
("status", {"affected"})
)
assert query == ([{"status": "affected"}], [])
def test_status_person_set_collection_some_selected_and_default(
status_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_collection,
("status", {"affected", "unknown"})
)
assert query == ([], [{"status": "unaffected"}])
@pytest.fixture
def status_sex_collection(families_fixture):
config = get_person_set_collections_config(textwrap.dedent("""
[person_set_collections]
selected_person_set_collections = ["status_sex"]
status_sex.id = "status_sex"
status_sex.name = "Affected Status and Sex"
status_sex.sources = [
{ from = "pedigree", source = "status" },
{ from = "pedigree", source = "sex" },
]
status_sex.domain = [
{ id = "affected_male", name = "Affected Male",
values = ["affected", "M"], color = "#ffffff" },
{ id = "affected_female", name = "Affected Female",
values = ["affected", "F"], color = "#ffffff" },
{ id = "unaffected_male", name = "Unaffected Male",
values = ["unaffected", "M"], color = "#ffffff" },
{ id = "unaffected_female", name = "Unaffected Female",
values = ["unaffected", "F"], color = "#ffffff" },
]
status_sex.default = { id="other", name="Other", color="#aaaaaa"}
"""))
return PersonSetCollection.from_families(
config.status_sex, families_fixture
)
def test_status_sex_person_set_collection_all_selected(
status_sex_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "affected_female",
"unaffected_male", "unaffected_female",
"other"})
)
assert query == ()
def test_status_sex_person_set_collection_some_selected_no_default(
status_sex_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "affected_female"})
)
assert query == (
[
{"sex": "F", "status": "affected"},
{"sex": "M", "status": "affected"},
], [])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"unaffected_male", "unaffected_female"})
)
assert query == (
[
{"sex": "F", "status": "unaffected"},
{"sex": "M", "status": "unaffected"}
], [])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "unaffected_female"})
)
assert query == ([
{"sex": "M", "status": "affected"},
{"sex": "F", "status": "unaffected"},
], [])
def test_status_sex_person_set_collection_some_selected_with_default(
status_sex_collection):
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "affected_female", "other"})
)
assert query == ([], [
{"sex": "F", "status": "unaffected"},
{"sex": "M", "status": "unaffected"},
])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"unaffected_male", "unaffected_female", "other"}))
assert query == ([], [
{"sex": "F", "status": "affected"},
{"sex": "M", "status": "affected"},
])
query = ImpalaVariants.build_person_set_collection_query(
status_sex_collection,
("status_sex", {
"affected_male", "unaffected_female", "other"})
)
assert query == ([], [
{"sex": "F", "status": "affected"},
{"sex": "M", "status": "unaffected"},
])
|
normal
|
{
"blob_id": "6c8f690e1b43d459535238e24cccc8aa118e2d57",
"index": 3038,
"step-1": "<mask token>\n\n\n@pytest.fixture\ndef families_fixture():\n ped_content = io.StringIO(convert_to_tab_separated(\n \"\"\"\n familyId personId dadId\t momId\tsex status role\n f1 mom1 0 0 2 1 mom\n f1 dad1 0 0 1 1 dad\n f1 prb1 dad1 mom1 1 2 prb\n f1 sib1 dad1 mom1 2 2 sib\n f1 sib2 dad1 mom1 2 2 sib\n f2 grmom2 0 0 2 0 maternal_grandmother\n f2 grdad2 0 0 1 0 maternal_grandfather\n f2 mom2 grdad2 grmom2 2 1 mom\n f2 dad2 0 0 1 1 dad\n f2 prb2 dad2 mom2 1 2 prb\n f2 sib2_3 dad2 mom2 2 2 sib\n \"\"\"\n ))\n families = FamiliesLoader(ped_content).load()\n assert families is not None\n return families\n\n\ndef get_person_set_collections_config(content: str):\n return GPFConfigParser.process_config(cast(Dict[str, Any], toml.loads(\n content)), {'person_set_collections': person_set_collections_schema}\n ).person_set_collections\n\n\n<mask token>\n\n\ndef test_status_person_set_collection(status_collection):\n assert status_collection is not None\n psc = status_collection\n assert len(psc.person_sets) == 3\n assert len(psc.person_sets['unknown'].persons) == 2\n assert len(psc.person_sets['affected'].persons) == 5\n assert len(psc.person_sets['unaffected'].persons) == 4\n\n\ndef test_status_person_set_collection_all_selected(status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unaffected', 'unknown'}))\n assert query == ()\n\n\n<mask token>\n\n\ndef test_status_person_set_collection_some_selected_and_default(\n status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unknown'}))\n assert query == ([], [{'status': 'unaffected'}])\n\n\n@pytest.fixture\ndef status_sex_collection(families_fixture):\n config = get_person_set_collections_config(textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status_sex\"]\n\n status_sex.id = \"status_sex\"\n status_sex.name = \"Affected Status and Sex\"\n status_sex.sources = [\n { from = \"pedigree\", source = \"status\" },\n { from = \"pedigree\", source = \"sex\" },\n ]\n status_sex.domain = [\n { id = \"affected_male\", name = \"Affected Male\",\n values = [\"affected\", \"M\"], color = \"#ffffff\" },\n { id = \"affected_female\", name = \"Affected Female\",\n values = [\"affected\", \"F\"], color = \"#ffffff\" },\n { id = \"unaffected_male\", name = \"Unaffected Male\",\n values = [\"unaffected\", \"M\"], color = \"#ffffff\" },\n { id = \"unaffected_female\", name = \"Unaffected Female\",\n values = [\"unaffected\", \"F\"], color = \"#ffffff\" },\n ]\n status_sex.default = { id=\"other\", name=\"Other\", color=\"#aaaaaa\"}\n \"\"\"\n ))\n return PersonSetCollection.from_families(config.status_sex,\n families_fixture)\n\n\n<mask token>\n\n\ndef test_status_sex_person_set_collection_some_selected_with_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef families_fixture():\n ped_content = io.StringIO(convert_to_tab_separated(\n \"\"\"\n familyId personId dadId\t momId\tsex status role\n f1 mom1 0 0 2 1 mom\n f1 dad1 0 0 1 1 dad\n f1 prb1 dad1 mom1 1 2 prb\n f1 sib1 dad1 mom1 2 2 sib\n f1 sib2 dad1 mom1 2 2 sib\n f2 grmom2 0 0 2 0 maternal_grandmother\n f2 grdad2 0 0 1 0 maternal_grandfather\n f2 mom2 grdad2 grmom2 2 1 mom\n f2 dad2 0 0 1 1 dad\n f2 prb2 dad2 mom2 1 2 prb\n f2 sib2_3 dad2 mom2 2 2 sib\n \"\"\"\n ))\n families = FamiliesLoader(ped_content).load()\n assert families is not None\n return families\n\n\ndef get_person_set_collections_config(content: str):\n return GPFConfigParser.process_config(cast(Dict[str, Any], toml.loads(\n content)), {'person_set_collections': person_set_collections_schema}\n ).person_set_collections\n\n\n<mask token>\n\n\ndef test_status_person_set_collection(status_collection):\n assert status_collection is not None\n psc = status_collection\n assert len(psc.person_sets) == 3\n assert len(psc.person_sets['unknown'].persons) == 2\n assert len(psc.person_sets['affected'].persons) == 5\n assert len(psc.person_sets['unaffected'].persons) == 4\n\n\ndef test_status_person_set_collection_all_selected(status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unaffected', 'unknown'}))\n assert query == ()\n\n\n<mask token>\n\n\ndef test_status_person_set_collection_some_selected_and_default(\n status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unknown'}))\n assert query == ([], [{'status': 'unaffected'}])\n\n\n@pytest.fixture\ndef status_sex_collection(families_fixture):\n config = get_person_set_collections_config(textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status_sex\"]\n\n status_sex.id = \"status_sex\"\n status_sex.name = \"Affected Status and Sex\"\n status_sex.sources = [\n { from = \"pedigree\", source = \"status\" },\n { from = \"pedigree\", source = \"sex\" },\n ]\n status_sex.domain = [\n { id = \"affected_male\", name = \"Affected Male\",\n values = [\"affected\", \"M\"], color = \"#ffffff\" },\n { id = \"affected_female\", name = \"Affected Female\",\n values = [\"affected\", \"F\"], color = \"#ffffff\" },\n { id = \"unaffected_male\", name = \"Unaffected Male\",\n values = [\"unaffected\", \"M\"], color = \"#ffffff\" },\n { id = \"unaffected_female\", name = \"Unaffected Female\",\n values = [\"unaffected\", \"F\"], color = \"#ffffff\" },\n ]\n status_sex.default = { id=\"other\", name=\"Other\", color=\"#aaaaaa\"}\n \"\"\"\n ))\n return PersonSetCollection.from_families(config.status_sex,\n families_fixture)\n\n\n<mask token>\n\n\ndef test_status_sex_person_set_collection_some_selected_no_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female'}))\n assert query == ([{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}], [])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female'}))\n assert query == ([{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}], [])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female'}))\n assert query == ([{'sex': 'M', 'status': 'affected'}, {'sex': 'F',\n 'status': 'unaffected'}], [])\n\n\ndef test_status_sex_person_set_collection_some_selected_with_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n",
"step-3": "<mask token>\n\n\n@pytest.fixture\ndef families_fixture():\n ped_content = io.StringIO(convert_to_tab_separated(\n \"\"\"\n familyId personId dadId\t momId\tsex status role\n f1 mom1 0 0 2 1 mom\n f1 dad1 0 0 1 1 dad\n f1 prb1 dad1 mom1 1 2 prb\n f1 sib1 dad1 mom1 2 2 sib\n f1 sib2 dad1 mom1 2 2 sib\n f2 grmom2 0 0 2 0 maternal_grandmother\n f2 grdad2 0 0 1 0 maternal_grandfather\n f2 mom2 grdad2 grmom2 2 1 mom\n f2 dad2 0 0 1 1 dad\n f2 prb2 dad2 mom2 1 2 prb\n f2 sib2_3 dad2 mom2 2 2 sib\n \"\"\"\n ))\n families = FamiliesLoader(ped_content).load()\n assert families is not None\n return families\n\n\ndef get_person_set_collections_config(content: str):\n return GPFConfigParser.process_config(cast(Dict[str, Any], toml.loads(\n content)), {'person_set_collections': person_set_collections_schema}\n ).person_set_collections\n\n\n@pytest.fixture\ndef status_collection(families_fixture):\n content = textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status\"]\n status.id = \"status\"\n status.name = \"Affected Status\"\n status.sources = [{ from = \"pedigree\", source = \"status\" }]\n status.domain = [\n {\n id = \"affected\",\n name = \"Affected\",\n values = [\"affected\"],\n color = \"#aabbcc\"\n },\n {\n id = \"unaffected\",\n name = \"Unaffected\",\n values = [\"unaffected\"],\n color = \"#ffffff\"\n },\n ]\n status.default = {id = \"unknown\",name = \"Unknown\",color = \"#aaaaaa\"}\n\n \"\"\"\n )\n config = get_person_set_collections_config(content)\n collection = PersonSetCollection.from_families(config.status,\n families_fixture)\n return collection\n\n\ndef test_status_person_set_collection(status_collection):\n assert status_collection is not None\n psc = status_collection\n assert len(psc.person_sets) == 3\n assert len(psc.person_sets['unknown'].persons) == 2\n assert len(psc.person_sets['affected'].persons) == 5\n assert len(psc.person_sets['unaffected'].persons) == 4\n\n\ndef test_status_person_set_collection_all_selected(status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unaffected', 'unknown'}))\n assert query == ()\n\n\ndef test_status_person_set_collection_some_selected_no_default(\n status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected'}))\n assert query == ([{'status': 'affected'}], [])\n\n\ndef test_status_person_set_collection_some_selected_and_default(\n status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unknown'}))\n assert query == ([], [{'status': 'unaffected'}])\n\n\n@pytest.fixture\ndef status_sex_collection(families_fixture):\n config = get_person_set_collections_config(textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status_sex\"]\n\n status_sex.id = \"status_sex\"\n status_sex.name = \"Affected Status and Sex\"\n status_sex.sources = [\n { from = \"pedigree\", source = \"status\" },\n { from = \"pedigree\", source = \"sex\" },\n ]\n status_sex.domain = [\n { id = \"affected_male\", name = \"Affected Male\",\n values = [\"affected\", \"M\"], color = \"#ffffff\" },\n { id = \"affected_female\", name = \"Affected Female\",\n values = [\"affected\", \"F\"], color = \"#ffffff\" },\n { id = \"unaffected_male\", name = \"Unaffected Male\",\n values = [\"unaffected\", \"M\"], color = \"#ffffff\" },\n { id = \"unaffected_female\", name = \"Unaffected Female\",\n values = [\"unaffected\", \"F\"], color = \"#ffffff\" },\n ]\n status_sex.default = { id=\"other\", name=\"Other\", color=\"#aaaaaa\"}\n \"\"\"\n ))\n return PersonSetCollection.from_families(config.status_sex,\n families_fixture)\n\n\n<mask token>\n\n\ndef test_status_sex_person_set_collection_some_selected_no_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female'}))\n assert query == ([{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}], [])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female'}))\n assert query == ([{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}], [])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female'}))\n assert query == ([{'sex': 'M', 'status': 'affected'}, {'sex': 'F',\n 'status': 'unaffected'}], [])\n\n\ndef test_status_sex_person_set_collection_some_selected_with_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n",
"step-4": "<mask token>\n\n\n@pytest.fixture\ndef families_fixture():\n ped_content = io.StringIO(convert_to_tab_separated(\n \"\"\"\n familyId personId dadId\t momId\tsex status role\n f1 mom1 0 0 2 1 mom\n f1 dad1 0 0 1 1 dad\n f1 prb1 dad1 mom1 1 2 prb\n f1 sib1 dad1 mom1 2 2 sib\n f1 sib2 dad1 mom1 2 2 sib\n f2 grmom2 0 0 2 0 maternal_grandmother\n f2 grdad2 0 0 1 0 maternal_grandfather\n f2 mom2 grdad2 grmom2 2 1 mom\n f2 dad2 0 0 1 1 dad\n f2 prb2 dad2 mom2 1 2 prb\n f2 sib2_3 dad2 mom2 2 2 sib\n \"\"\"\n ))\n families = FamiliesLoader(ped_content).load()\n assert families is not None\n return families\n\n\ndef get_person_set_collections_config(content: str):\n return GPFConfigParser.process_config(cast(Dict[str, Any], toml.loads(\n content)), {'person_set_collections': person_set_collections_schema}\n ).person_set_collections\n\n\n@pytest.fixture\ndef status_collection(families_fixture):\n content = textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status\"]\n status.id = \"status\"\n status.name = \"Affected Status\"\n status.sources = [{ from = \"pedigree\", source = \"status\" }]\n status.domain = [\n {\n id = \"affected\",\n name = \"Affected\",\n values = [\"affected\"],\n color = \"#aabbcc\"\n },\n {\n id = \"unaffected\",\n name = \"Unaffected\",\n values = [\"unaffected\"],\n color = \"#ffffff\"\n },\n ]\n status.default = {id = \"unknown\",name = \"Unknown\",color = \"#aaaaaa\"}\n\n \"\"\"\n )\n config = get_person_set_collections_config(content)\n collection = PersonSetCollection.from_families(config.status,\n families_fixture)\n return collection\n\n\ndef test_status_person_set_collection(status_collection):\n assert status_collection is not None\n psc = status_collection\n assert len(psc.person_sets) == 3\n assert len(psc.person_sets['unknown'].persons) == 2\n assert len(psc.person_sets['affected'].persons) == 5\n assert len(psc.person_sets['unaffected'].persons) == 4\n\n\ndef test_status_person_set_collection_all_selected(status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unaffected', 'unknown'}))\n assert query == ()\n\n\ndef test_status_person_set_collection_some_selected_no_default(\n status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected'}))\n assert query == ([{'status': 'affected'}], [])\n\n\ndef test_status_person_set_collection_some_selected_and_default(\n status_collection):\n query = ImpalaVariants.build_person_set_collection_query(status_collection,\n ('status', {'affected', 'unknown'}))\n assert query == ([], [{'status': 'unaffected'}])\n\n\n@pytest.fixture\ndef status_sex_collection(families_fixture):\n config = get_person_set_collections_config(textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status_sex\"]\n\n status_sex.id = \"status_sex\"\n status_sex.name = \"Affected Status and Sex\"\n status_sex.sources = [\n { from = \"pedigree\", source = \"status\" },\n { from = \"pedigree\", source = \"sex\" },\n ]\n status_sex.domain = [\n { id = \"affected_male\", name = \"Affected Male\",\n values = [\"affected\", \"M\"], color = \"#ffffff\" },\n { id = \"affected_female\", name = \"Affected Female\",\n values = [\"affected\", \"F\"], color = \"#ffffff\" },\n { id = \"unaffected_male\", name = \"Unaffected Male\",\n values = [\"unaffected\", \"M\"], color = \"#ffffff\" },\n { id = \"unaffected_female\", name = \"Unaffected Female\",\n values = [\"unaffected\", \"F\"], color = \"#ffffff\" },\n ]\n status_sex.default = { id=\"other\", name=\"Other\", color=\"#aaaaaa\"}\n \"\"\"\n ))\n return PersonSetCollection.from_families(config.status_sex,\n families_fixture)\n\n\ndef test_status_sex_person_set_collection_all_selected(status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female', 'unaffected_male', 'unaffected_female', 'other'}))\n assert query == ()\n\n\ndef test_status_sex_person_set_collection_some_selected_no_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female'}))\n assert query == ([{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}], [])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female'}))\n assert query == ([{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}], [])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female'}))\n assert query == ([{'sex': 'M', 'status': 'affected'}, {'sex': 'F',\n 'status': 'unaffected'}], [])\n\n\ndef test_status_sex_person_set_collection_some_selected_with_default(\n status_sex_collection):\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'affected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'unaffected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'unaffected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'affected'}])\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection, ('status_sex', {'affected_male',\n 'unaffected_female', 'other'}))\n assert query == ([], [{'sex': 'F', 'status': 'affected'}, {'sex': 'M',\n 'status': 'unaffected'}])\n",
"step-5": "# pylint: disable=W0621,C0114,C0116,W0212,W0613\nimport io\nimport textwrap\nfrom typing import cast, Any, Dict\n\nimport toml\nimport pytest\n\nfrom dae.testing import convert_to_tab_separated\nfrom dae.configuration.gpf_config_parser import GPFConfigParser\nfrom dae.configuration.schemas.person_sets import person_set_collections_schema\nfrom dae.pedigrees.loader import FamiliesLoader\nfrom dae.person_sets import PersonSetCollection\n\nfrom impala_storage.schema1.impala_variants import ImpalaVariants\n\n\n@pytest.fixture\ndef families_fixture():\n ped_content = io.StringIO(convert_to_tab_separated(\n \"\"\"\n familyId personId dadId\t momId\tsex status role\n f1 mom1 0 0 2 1 mom\n f1 dad1 0 0 1 1 dad\n f1 prb1 dad1 mom1 1 2 prb\n f1 sib1 dad1 mom1 2 2 sib\n f1 sib2 dad1 mom1 2 2 sib\n f2 grmom2 0 0 2 0 maternal_grandmother\n f2 grdad2 0 0 1 0 maternal_grandfather\n f2 mom2 grdad2 grmom2 2 1 mom\n f2 dad2 0 0 1 1 dad\n f2 prb2 dad2 mom2 1 2 prb\n f2 sib2_3 dad2 mom2 2 2 sib\n \"\"\"))\n families = FamiliesLoader(ped_content).load()\n assert families is not None\n return families\n\n\ndef get_person_set_collections_config(content: str):\n return GPFConfigParser.process_config(\n cast(Dict[str, Any], toml.loads(content)),\n {\"person_set_collections\": person_set_collections_schema},\n ).person_set_collections\n\n\n@pytest.fixture\ndef status_collection(families_fixture):\n content = textwrap.dedent(\n \"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status\"]\n status.id = \"status\"\n status.name = \"Affected Status\"\n status.sources = [{ from = \"pedigree\", source = \"status\" }]\n status.domain = [\n {\n id = \"affected\",\n name = \"Affected\",\n values = [\"affected\"],\n color = \"#aabbcc\"\n },\n {\n id = \"unaffected\",\n name = \"Unaffected\",\n values = [\"unaffected\"],\n color = \"#ffffff\"\n },\n ]\n status.default = {id = \"unknown\",name = \"Unknown\",color = \"#aaaaaa\"}\n\n \"\"\")\n\n config = get_person_set_collections_config(content)\n\n collection = PersonSetCollection.from_families(\n config.status, families_fixture)\n return collection\n\n\ndef test_status_person_set_collection(status_collection):\n assert status_collection is not None\n psc = status_collection\n\n assert len(psc.person_sets) == 3\n assert len(psc.person_sets[\"unknown\"].persons) == 2\n assert len(psc.person_sets[\"affected\"].persons) == 5\n assert len(psc.person_sets[\"unaffected\"].persons) == 4\n\n\ndef test_status_person_set_collection_all_selected(\n status_collection):\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_collection,\n (\"status\", {\"affected\", \"unaffected\", \"unknown\"})\n )\n\n assert query == ()\n\n\ndef test_status_person_set_collection_some_selected_no_default(\n status_collection):\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_collection,\n (\"status\", {\"affected\"})\n )\n\n assert query == ([{\"status\": \"affected\"}], [])\n\n\ndef test_status_person_set_collection_some_selected_and_default(\n status_collection):\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_collection,\n (\"status\", {\"affected\", \"unknown\"})\n )\n\n assert query == ([], [{\"status\": \"unaffected\"}])\n\n\n@pytest.fixture\ndef status_sex_collection(families_fixture):\n config = get_person_set_collections_config(textwrap.dedent(\"\"\"\n [person_set_collections]\n selected_person_set_collections = [\"status_sex\"]\n\n status_sex.id = \"status_sex\"\n status_sex.name = \"Affected Status and Sex\"\n status_sex.sources = [\n { from = \"pedigree\", source = \"status\" },\n { from = \"pedigree\", source = \"sex\" },\n ]\n status_sex.domain = [\n { id = \"affected_male\", name = \"Affected Male\",\n values = [\"affected\", \"M\"], color = \"#ffffff\" },\n { id = \"affected_female\", name = \"Affected Female\",\n values = [\"affected\", \"F\"], color = \"#ffffff\" },\n { id = \"unaffected_male\", name = \"Unaffected Male\",\n values = [\"unaffected\", \"M\"], color = \"#ffffff\" },\n { id = \"unaffected_female\", name = \"Unaffected Female\",\n values = [\"unaffected\", \"F\"], color = \"#ffffff\" },\n ]\n status_sex.default = { id=\"other\", name=\"Other\", color=\"#aaaaaa\"}\n \"\"\"))\n\n return PersonSetCollection.from_families(\n config.status_sex, families_fixture\n )\n\n\ndef test_status_sex_person_set_collection_all_selected(\n status_sex_collection):\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"affected_male\", \"affected_female\",\n \"unaffected_male\", \"unaffected_female\",\n \"other\"})\n )\n\n assert query == ()\n\n\ndef test_status_sex_person_set_collection_some_selected_no_default(\n status_sex_collection):\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"affected_male\", \"affected_female\"})\n )\n\n assert query == (\n [\n {\"sex\": \"F\", \"status\": \"affected\"},\n {\"sex\": \"M\", \"status\": \"affected\"},\n ], [])\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"unaffected_male\", \"unaffected_female\"})\n )\n\n assert query == (\n [\n {\"sex\": \"F\", \"status\": \"unaffected\"},\n {\"sex\": \"M\", \"status\": \"unaffected\"}\n ], [])\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"affected_male\", \"unaffected_female\"})\n )\n\n assert query == ([\n {\"sex\": \"M\", \"status\": \"affected\"},\n {\"sex\": \"F\", \"status\": \"unaffected\"},\n ], [])\n\n\ndef test_status_sex_person_set_collection_some_selected_with_default(\n status_sex_collection):\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"affected_male\", \"affected_female\", \"other\"})\n )\n\n assert query == ([], [\n {\"sex\": \"F\", \"status\": \"unaffected\"},\n {\"sex\": \"M\", \"status\": \"unaffected\"},\n ])\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"unaffected_male\", \"unaffected_female\", \"other\"}))\n\n assert query == ([], [\n {\"sex\": \"F\", \"status\": \"affected\"},\n {\"sex\": \"M\", \"status\": \"affected\"},\n ])\n\n query = ImpalaVariants.build_person_set_collection_query(\n status_sex_collection,\n (\"status_sex\", {\n \"affected_male\", \"unaffected_female\", \"other\"})\n )\n\n assert query == ([], [\n {\"sex\": \"F\", \"status\": \"affected\"},\n {\"sex\": \"M\", \"status\": \"unaffected\"},\n ])\n",
"step-ids": [
7,
8,
10,
11,
13
]
}
|
[
7,
8,
10,
11,
13
] |
# Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.
# Main algos UPGMA and CLUSTALW
from dataclasses import dataclass
import FormattingET
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
#UPGMA algos
def initializeMatrix(m, n):
mtx = [[0 for x in range(n)] for y in range(m)]
return mtx
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0]*int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
def initializeTree(speciesNames):
numLeaves = len(speciesNames)
t = [Node]*(2*numLeaves - 1)
for i in range(len(t)):
vx = Node()
if i < numLeaves:
vx.label = speciesNames[i]
else:
vx.label = "Ancestor species" + str(i)
vx.num = i
t[i] = vx
return t
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
def delClusters(clusters, row, col):
del clusters[col]
del clusters[row]
return clusters
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row+1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
def delRowCol(mtx, row, col):
del mtx[col]
del mtx[row]
for i in range(len(mtx)):
del mtx[i][col]
del mtx[i][row]
return mtx
def addRowCol(mtx, clusters, row, col):
newRow = [0]*(len(mtx) + 1)
for i in range(len(newRow) - 1):
if i != row and i != col:
size1 = countLeaves(clusters[row])
size2 = countLeaves(clusters[col])
avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)
newRow[i] = avg
mtx.append(newRow)
for i in range(len(newRow) - 1):
mtx[i].append(newRow[i])
return mtx
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2*numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min/2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
#CLUSTALW algos
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = ['']*len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * (-supergap)
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * (-supergap)
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i-1][j] - supergap
left = scoreTable[i][j-1] - supergap
diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = "LEFT"
for i in range(1, numRows):
backtrack[i][0] = "UP"
for i in range(1, numRows):
for j in range(1, numCols):
if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):
backtrack[i][j] = "UP"
elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:
backtrack[i][j] = "LEFT"
else:
backtrack[i][j] = "DIAG"
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ""
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while(row != 0 or col != 0):
k = len(string)
if backtrack[row][col] == "UP":
if (orientation == "top"):
aligned = "-" + aligned
elif orientation == "side":
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == "LEFT":
if (orientation == "side"):
aligned = "-" + aligned
elif orientation == "top":
aligned = str(string[k-1]) + aligned
string = string[:k-1]
col -= 1
else:
aligned = str(string[k-1]) + aligned
string = string[:k-1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[""] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, "side")
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, "top")
return a
def progressiveAlign(align1, align2, match, mismatch, gap, supergap):
scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)
backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)
opt = outputProgressiveAlign(align1, align2, backtrack)
return opt
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
#main
if __name__ == "__main__":
print("UPGMA Test")
mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]
labels = ["H", "C", "W", "S"]
tree = upgma(mtx, labels)
print("CLUSTALW Test")
#cats = ["USA", "CHN", "ITA"]
mtxreturn = FormattingET.readMatrixFromFile("Datasets/Input/Test-Example/distance.mtx")
mtx1 = mtxreturn[0]
labels1 = mtxreturn[1]
t = upgma(mtx1, labels1)
match = 1.0
mismatch = 1.0
gap = 1.0
supergap = 6.0
dnaMap = FormattingET.readDNAStringsFromFile("Datasets/Input/Test-Example/RAW/toy-example.fasta")
keyvalues = FormattingET.getKeyValues(dnaMap)
newLabels = keyvalues[0]
newDnaStrings = keyvalues[1]
dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)
align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)
FormattingET.writeAlignmentToFile(align, labels1, "Datasets/Output/Test-Example", "toy.aln")
print(align)
|
normal
|
{
"blob_id": "53cf2dfe3319c39ca6f1dc890eea578fae654b5b",
"index": 8847,
"step-1": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\n<mask token>\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\n<mask token>\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\n<mask token>\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\n<mask token>\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\n<mask token>\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\n<mask token>\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\n<mask token>\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\n<mask token>\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\ndef initializeMatrix(m, n):\n mtx = [[(0) for x in range(n)] for y in range(m)]\n return mtx\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n t = [Node] * (2 * numLeaves - 1)\n for i in range(len(t)):\n vx = Node()\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = 'Ancestor species' + str(i)\n vx.num = i\n t[i] = vx\n return t\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\n<mask token>\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0] * (len(mtx) + 1)\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n mtx.append(newRow)\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n return mtx\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,\n supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match,\n mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n return opt\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n\ndef initializeMatrix(m, n):\n mtx = [[(0) for x in range(n)] for y in range(m)]\n return mtx\n\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0] * int(numLeaves)\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n return clusters\n\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n t = [Node] * (2 * numLeaves - 1)\n for i in range(len(t)):\n vx = Node()\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = 'Ancestor species' + str(i)\n vx.num = i\n t[i] = vx\n return t\n\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n return countLeaves(v.child1) + countLeaves(v.child2)\n\n\ndef delClusters(clusters, row, col):\n del clusters[col]\n del clusters[row]\n return clusters\n\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row + 1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n return minRow, minCol, minElement\n\n\n<mask token>\n\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0] * (len(mtx) + 1)\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1 * mtx[row][i] + size2 * mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n mtx.append(newRow)\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n return mtx\n\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n for i in range(numLeaves, 2 * numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n tree[i].age = min / 2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n clusters = delClusters(clusters, row, col)\n return tree\n\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = [''] * len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n score = 0.0\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n return score\n\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[(0) for j in range(len(align2[0]) + 1)] for i in range(\n len(align1[0]) + 1)]\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * -supergap\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * -supergap\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n up = scoreTable[i - 1][j] - supergap\n left = scoreTable[i][j - 1] - supergap\n diag = scoreTable[i - 1][j - 1] + sumPairScores(align1, align2,\n i - 1, j - 1, match, mismatch, gap)\n scoreTable[i][j] = max(up, left, diag)\n return scoreTable\n\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap,\n supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n for i in range(1, numCols):\n backtrack[0][i] = 'LEFT'\n for i in range(1, numRows):\n backtrack[i][0] = 'UP'\n for i in range(1, numRows):\n for j in range(1, numCols):\n if scoreTable[i][j] == scoreTable[i - 1][j] - supergap:\n backtrack[i][j] = 'UP'\n elif scoreTable[i][j] == scoreTable[i][j - 1] - supergap:\n backtrack[i][j] = 'LEFT'\n else:\n backtrack[i][j] = 'DIAG'\n return backtrack\n\n\ndef backtracker(string, backtrack, orientation):\n aligned = ''\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n while row != 0 or col != 0:\n k = len(string)\n if backtrack[row][col] == 'UP':\n if orientation == 'top':\n aligned = '-' + aligned\n elif orientation == 'side':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == 'LEFT':\n if orientation == 'side':\n aligned = '-' + aligned\n elif orientation == 'top':\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n col -= 1\n else:\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n col -= 1\n return aligned\n\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[''] for i in range(len(align1) + len(align2))]\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, 'side')\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, 'top')\n return a\n\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap,\n supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match,\n mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n return opt\n\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.\n alignment, match, mismatch, gap, supergap)\n return guideTree[len(guideTree) - 1].alignment\n\n\n<mask token>\n",
"step-5": "# Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.\n# Main algos UPGMA and CLUSTALW\nfrom dataclasses import dataclass\nimport FormattingET\n\n@dataclass\nclass Node:\n age: int\n num: int\n label: str\n alignment: []\n def __init__(self, child1=None, child2=None):\n self.child1 = child1\n self.child2 = child2\n\n#UPGMA algos\n\ndef initializeMatrix(m, n):\n mtx = [[0 for x in range(n)] for y in range(m)]\n return mtx\n\ndef initializeClusters(t):\n numNodes = len(t)\n numLeaves = (numNodes + 1) / 2\n clusters = [0]*int(numLeaves)\n\n for i in range(int(numLeaves)):\n clusters[i] = t[i]\n\n return clusters\n\ndef initializeTree(speciesNames):\n numLeaves = len(speciesNames)\n\n t = [Node]*(2*numLeaves - 1)\n\n for i in range(len(t)):\n vx = Node()\n\n if i < numLeaves:\n vx.label = speciesNames[i]\n else:\n vx.label = \"Ancestor species\" + str(i)\n vx.num = i\n t[i] = vx\n\n return t\n\ndef countLeaves(v: Node):\n if v.child1 is None or v.child2 is None:\n return 1\n\n return countLeaves(v.child1) + countLeaves(v.child2)\n\ndef delClusters(clusters, row, col):\n del clusters[col]\n del clusters[row]\n return clusters\n\ndef findMinElement(mtx):\n minRow = 0\n minCol = 1\n minElement = mtx[0][1]\n for row in range(0, len(mtx)):\n for col in range(row+1, len(mtx)):\n if mtx[row][col] < minElement:\n minRow = row\n minCol = col\n minElement = mtx[row][col]\n\n return minRow, minCol, minElement\n\ndef delRowCol(mtx, row, col):\n del mtx[col]\n del mtx[row]\n\n for i in range(len(mtx)):\n del mtx[i][col]\n del mtx[i][row]\n\n return mtx\n\ndef addRowCol(mtx, clusters, row, col):\n newRow = [0]*(len(mtx) + 1)\n\n for i in range(len(newRow) - 1):\n if i != row and i != col:\n size1 = countLeaves(clusters[row])\n size2 = countLeaves(clusters[col])\n avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)\n newRow[i] = avg\n\n mtx.append(newRow)\n\n for i in range(len(newRow) - 1):\n mtx[i].append(newRow[i])\n\n return mtx\n\ndef upgma(mtx, speciesNames):\n tree = initializeTree(speciesNames)\n clusters = initializeClusters(tree)\n numLeaves = len(mtx)\n\n for i in range(numLeaves, 2*numLeaves - 1):\n minElements = findMinElement(mtx)\n row = minElements[0]\n col = minElements[1]\n min = minElements[2]\n\n tree[i].age = min/2\n tree[i].child1 = clusters[row]\n tree[i].child2 = clusters[col]\n\n mtx = addRowCol(mtx, clusters, row, col)\n clusters.append(tree[i])\n mtx = delRowCol(mtx, row, col)\n\n clusters = delClusters(clusters, row, col)\n\n return tree\n\n#CLUSTALW algos\n\ndef sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):\n alignment1 = ['']*len(align1)\n for i in range(len(align1)):\n alignment1[i] = align1[i][idx1]\n\n alignment2 = [''] * len(align2)\n for i in range(len(align2)):\n alignment2[i] = align2[i][idx2]\n\n score = 0.0\n\n for char in alignment1:\n for char2 in alignment2:\n if char == '-' and char2 == '-':\n continue\n elif char == char2:\n score += match\n elif char != '-' and char2 != '-':\n score -= mismatch\n else:\n score -= gap\n\n return score\n\ndef generateScoreTable(align1, align2, match, mismatch, gap, supergap):\n scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]\n\n for i in range(len(scoreTable)):\n scoreTable[i][0] = i * (-supergap)\n for i in range(len(scoreTable[0])):\n scoreTable[0][i] = i * (-supergap)\n\n for i in range(1, len(align1[0]) + 1):\n for j in range(1, len(align2[0]) + 1):\n\n up = scoreTable[i-1][j] - supergap\n left = scoreTable[i][j-1] - supergap\n diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)\n\n scoreTable[i][j] = max(up, left, diag)\n\n return scoreTable\n\ndef progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):\n numRows = len(align1[0]) + 1\n numCols = len(align2[0]) + 1\n\n backtrack = [['' for i in range(numCols)] for j in range(numRows)]\n\n for i in range(1, numCols):\n backtrack[0][i] = \"LEFT\"\n for i in range(1, numRows):\n backtrack[i][0] = \"UP\"\n\n for i in range(1, numRows):\n for j in range(1, numCols):\n if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):\n backtrack[i][j] = \"UP\"\n elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:\n backtrack[i][j] = \"LEFT\"\n else:\n backtrack[i][j] = \"DIAG\"\n\n return backtrack\n\ndef backtracker(string, backtrack, orientation):\n aligned = \"\"\n\n row = len(backtrack) - 1\n col = len(backtrack[0]) - 1\n\n while(row != 0 or col != 0):\n k = len(string)\n\n if backtrack[row][col] == \"UP\":\n if (orientation == \"top\"):\n aligned = \"-\" + aligned\n elif orientation == \"side\":\n aligned = str(string[k - 1]) + aligned\n string = string[:k - 1]\n row -= 1\n elif backtrack[row][col] == \"LEFT\":\n if (orientation == \"side\"):\n aligned = \"-\" + aligned\n elif orientation == \"top\":\n aligned = str(string[k-1]) + aligned\n string = string[:k-1]\n col -= 1\n else:\n aligned = str(string[k-1]) + aligned\n string = string[:k-1]\n row -= 1\n col -= 1\n\n return aligned\n\ndef outputProgressiveAlign(align1, align2, backtrack):\n a = [[\"\"] for i in range(len(align1) + len(align2))]\n\n for i in range(len(align1)):\n a[i] = backtracker(align1[i], backtrack, \"side\")\n for j in range(len(align1), len(align2) + len(align1)):\n a[j] = backtracker(align2[j - len(align1)], backtrack, \"top\")\n\n return a\n\ndef progressiveAlign(align1, align2, match, mismatch, gap, supergap):\n scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)\n backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)\n opt = outputProgressiveAlign(align1, align2, backtrack)\n\n return opt\n\ndef clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):\n\n for i in range(len(dnaStrings)):\n guideTree[i].alignment = [dnaStrings[i]]\n\n for j in range(len(dnaStrings), len(guideTree)):\n child1 = guideTree[j].child1\n child2 = guideTree[j].child2\n\n guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)\n\n return guideTree[len(guideTree) - 1].alignment\n\n\n#main\nif __name__ == \"__main__\":\n print(\"UPGMA Test\")\n mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]\n labels = [\"H\", \"C\", \"W\", \"S\"]\n tree = upgma(mtx, labels)\n\n print(\"CLUSTALW Test\")\n \n #cats = [\"USA\", \"CHN\", \"ITA\"]\n\n mtxreturn = FormattingET.readMatrixFromFile(\"Datasets/Input/Test-Example/distance.mtx\")\n mtx1 = mtxreturn[0]\n labels1 = mtxreturn[1]\n\n t = upgma(mtx1, labels1)\n\n match = 1.0\n mismatch = 1.0\n gap = 1.0\n supergap = 6.0\n \n dnaMap = FormattingET.readDNAStringsFromFile(\"Datasets/Input/Test-Example/RAW/toy-example.fasta\")\n keyvalues = FormattingET.getKeyValues(dnaMap)\n newLabels = keyvalues[0]\n newDnaStrings = keyvalues[1]\n\n dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)\n align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)\n FormattingET.writeAlignmentToFile(align, labels1, \"Datasets/Output/Test-Example\", \"toy.aln\")\n print(align)\n ",
"step-ids": [
9,
12,
16,
17,
21
]
}
|
[
9,
12,
16,
17,
21
] |
# -*- coding: utf-8 -*-
"""Test(s) for static files
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import os
_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'
def setup_module(module):
os.environ.update(
SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID,
)
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
# test non-static page
r = fc.get('myapp')
pkok(
not re.search(
r'googletag',
pkcompat.from_bytes(r.data)
),
'Unexpected injection of googletag data={}',
r.data
)
# test successful injection
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
|
normal
|
{
"blob_id": "65b5db0bc6f23c342138060b7a006ff61e2dcf45",
"index": 3761,
"step-1": "<mask token>\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-2": "<mask token>\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-3": "<mask token>\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\nimport pytest\nimport os\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Test(s) for static files\n\n:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport pytest\nimport os\n\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(\n SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID,\n )\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n\n # test non-static page\n r = fc.get('myapp')\n pkok(\n not re.search(\n r'googletag',\n pkcompat.from_bytes(r.data)\n ),\n 'Unexpected injection of googletag data={}',\n r.data\n )\n\n # test successful injection\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class UserInfoAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserInfoAdmin(admin.ModelAdmin):
list_display = ['user_name', 'user_profession', 'user_phone',
'user_email', 'user_address', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
search_fields = ['user_name', 'user_profession', 'user_phone',
'user_email', 'user_address', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
list_display_links = ['user_name', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
list_editable = ['user_profession', 'user_phone', 'user_email',
'user_address']
fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',
'user_profession']}), ('Contact Info', {'fields': ['user_phone',
'user_email', 'user_address']}), ('Social Links', {'fields': [
'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',
'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',
{'fields': ['user_info', 'user_experience', 'user_edu']})
formfield_overrides = {models.TextField: {'widget': TinyMCE}}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserInfoAdmin(admin.ModelAdmin):
list_display = ['user_name', 'user_profession', 'user_phone',
'user_email', 'user_address', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
search_fields = ['user_name', 'user_profession', 'user_phone',
'user_email', 'user_address', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
list_display_links = ['user_name', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
list_editable = ['user_profession', 'user_phone', 'user_email',
'user_address']
fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',
'user_profession']}), ('Contact Info', {'fields': ['user_phone',
'user_email', 'user_address']}), ('Social Links', {'fields': [
'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',
'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',
{'fields': ['user_info', 'user_experience', 'user_edu']})
formfield_overrides = {models.TextField: {'widget': TinyMCE}}
admin.site.register(UserInfo, UserInfoAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import UserInfo
class UserInfoAdmin(admin.ModelAdmin):
list_display = ['user_name', 'user_profession', 'user_phone',
'user_email', 'user_address', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
search_fields = ['user_name', 'user_profession', 'user_phone',
'user_email', 'user_address', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
list_display_links = ['user_name', 'facebook_link', 'instagram_link',
'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',
'stackoverflow_link', 'facebook_link']
list_editable = ['user_profession', 'user_phone', 'user_email',
'user_address']
fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',
'user_profession']}), ('Contact Info', {'fields': ['user_phone',
'user_email', 'user_address']}), ('Social Links', {'fields': [
'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',
'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',
{'fields': ['user_info', 'user_experience', 'user_edu']})
formfield_overrides = {models.TextField: {'widget': TinyMCE}}
admin.site.register(UserInfo, UserInfoAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from django.db import models
from tinymce.widgets import TinyMCE
from .models import UserInfo
# Register your models here.
class UserInfoAdmin(admin.ModelAdmin):
list_display=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
search_fields=[
'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_display_links=[
'user_name',
# 'user_profession',
# 'user_phone',
# 'user_email',
# 'user_address',
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
'facebook_link',
]
list_editable = [
# 'user_name',
'user_profession',
'user_phone',
'user_email',
'user_address',
# 'facebook_link',
# 'instagram_link',
# 'telegram_link',
# 'whatsup_link',
# 'linkedin_link',
# 'github_link',
# 'stackoverflow_link',
# 'facebook_link',
]
fieldsets=(
('Basic Info', {'fields' : [
'user_image',
'user_name',
'user_profession',
],
},
),
(
'Contact Info', {
'fields': [
'user_phone',
'user_email',
'user_address',
],
},
),
(
'Social Links', {
'fields': [
'facebook_link',
'instagram_link',
'telegram_link',
'whatsup_link',
'linkedin_link',
'github_link',
'stackoverflow_link',
],
},
),
(
'Core Info', {
'fields' :[
'user_info',
'user_experience',
'user_edu',
],
},
),
)
formfield_overrides = {
models.TextField: {'widget': TinyMCE}
}
admin.site.register(UserInfo, UserInfoAdmin)
|
flexible
|
{
"blob_id": "15134d7e4036c102bc9d2ba4d321fadd0467100f",
"index": 6637,
"step-1": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\nadmin.site.register(UserInfo, UserInfoAdmin)\n",
"step-4": "from django.contrib import admin\nfrom django.db import models\nfrom tinymce.widgets import TinyMCE\nfrom .models import UserInfo\n\n\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n search_fields = ['user_name', 'user_profession', 'user_phone',\n 'user_email', 'user_address', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_display_links = ['user_name', 'facebook_link', 'instagram_link',\n 'telegram_link', 'whatsup_link', 'linkedin_link', 'github_link',\n 'stackoverflow_link', 'facebook_link']\n list_editable = ['user_profession', 'user_phone', 'user_email',\n 'user_address']\n fieldsets = ('Basic Info', {'fields': ['user_image', 'user_name',\n 'user_profession']}), ('Contact Info', {'fields': ['user_phone',\n 'user_email', 'user_address']}), ('Social Links', {'fields': [\n 'facebook_link', 'instagram_link', 'telegram_link', 'whatsup_link',\n 'linkedin_link', 'github_link', 'stackoverflow_link']}), ('Core Info',\n {'fields': ['user_info', 'user_experience', 'user_edu']})\n formfield_overrides = {models.TextField: {'widget': TinyMCE}}\n\n\nadmin.site.register(UserInfo, UserInfoAdmin)\n",
"step-5": "from django.contrib import admin\nfrom django.db import models\nfrom tinymce.widgets import TinyMCE\n\nfrom .models import UserInfo\n\n# Register your models here.\nclass UserInfoAdmin(admin.ModelAdmin):\n list_display=[\n 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n search_fields=[\n 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n list_display_links=[\n 'user_name', \n # 'user_profession', \n # 'user_phone', \n # 'user_email', \n # 'user_address', \n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n 'facebook_link', \n ]\n list_editable = [\n # 'user_name', \n 'user_profession', \n 'user_phone', \n 'user_email', \n 'user_address', \n # 'facebook_link', \n # 'instagram_link', \n # 'telegram_link', \n # 'whatsup_link', \n # 'linkedin_link', \n # 'github_link', \n # 'stackoverflow_link', \n # 'facebook_link', \n ]\n\n fieldsets=(\n ('Basic Info', {'fields' : [\n 'user_image', \n 'user_name', \n 'user_profession', \n ],\n },\n ),\n (\n 'Contact Info', {\n 'fields': [\n 'user_phone', \n 'user_email', \n 'user_address', \n ],\n },\n ),\n (\n 'Social Links', {\n 'fields': [\n 'facebook_link', \n 'instagram_link', \n 'telegram_link', \n 'whatsup_link', \n 'linkedin_link', \n 'github_link', \n 'stackoverflow_link', \n ],\n },\n ),\n (\n 'Core Info', {\n 'fields' :[\n 'user_info',\n 'user_experience',\n 'user_edu',\n ],\n },\n ),\n )\n formfield_overrides = {\n models.TextField: {'widget': TinyMCE}\n }\nadmin.site.register(UserInfo, UserInfoAdmin)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sty.use('seaborn')
<|reserved_special_token_0|>
rospy.init_node('graph_poses_extract')
for f in replayFiles:
print('new SLiding Graph')
inlierData = []
rmsData = []
inlierRatio = []
inFile = inNet + '/' + f + '.pose'
with open(inFile, 'r') as fread:
print(f)
data = pickle.load(fread)
print('Loaded')
with open(out + '/' + f + '.inlier', 'w') as outFIle:
pickle.dump(data.getInlierMotion(), outFIle)
print('1')
with open(out + '/' + f + '.inlierRMS', 'w') as outFIle:
pickle.dump(data.getInlierRMS(), outFIle)
print('extracted2')
with open(out + '/' + f + '.tracks', 'w') as outFIle:
pickle.dump(data.getTotalTracks(), outFIle)
print('extracted3')
with open(out + '/' + f + '.delta', 'w') as outFIle:
pickle.dump(data.getDeltaMotion(), outFIle)
print('extracted4')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sty.use('seaborn')
<|reserved_special_token_0|>
out = '/home/ryan/recording/poseGraph/ORB/summary'
inNet = '/home/ryan/recording/poseGraph/ORB'
replayFiles = ['5000_A5', '5000_A6', '5000_A12', '5000_A13', '5000_A14']
rospy.init_node('graph_poses_extract')
for f in replayFiles:
print('new SLiding Graph')
inlierData = []
rmsData = []
inlierRatio = []
inFile = inNet + '/' + f + '.pose'
with open(inFile, 'r') as fread:
print(f)
data = pickle.load(fread)
print('Loaded')
with open(out + '/' + f + '.inlier', 'w') as outFIle:
pickle.dump(data.getInlierMotion(), outFIle)
print('1')
with open(out + '/' + f + '.inlierRMS', 'w') as outFIle:
pickle.dump(data.getInlierRMS(), outFIle)
print('extracted2')
with open(out + '/' + f + '.tracks', 'w') as outFIle:
pickle.dump(data.getTotalTracks(), outFIle)
print('extracted3')
with open(out + '/' + f + '.delta', 'w') as outFIle:
pickle.dump(data.getDeltaMotion(), outFIle)
print('extracted4')
<|reserved_special_token_1|>
from bumblebee.motion import *
from simulation.path import *
from simulation.settings import *
import tf.transformations
from geometry_msgs.msg import TransformStamped, Transform, Quaternion, Vector3
from bumblebee.baseTypes import basicGraph, slidingGraph
from simulation.dataset import stereo_simulator_node
import pickle
import os
import rospy
import time
import scipy.stats.mstats as stat
from scipy.stats import norm, cauchy
import matplotlib.pyplot as plt
import matplotlib.style as sty
from mpl_toolkits.mplot3d import Axes3D
sty.use('seaborn')
from tf import TransformListener, TransformBroadcaster
from tf.transformations import *
import numpy as np
out = '/home/ryan/recording/poseGraph/ORB/summary'
inNet = '/home/ryan/recording/poseGraph/ORB'
replayFiles = ['5000_A5', '5000_A6', '5000_A12', '5000_A13', '5000_A14']
rospy.init_node('graph_poses_extract')
for f in replayFiles:
print('new SLiding Graph')
inlierData = []
rmsData = []
inlierRatio = []
inFile = inNet + '/' + f + '.pose'
with open(inFile, 'r') as fread:
print(f)
data = pickle.load(fread)
print('Loaded')
with open(out + '/' + f + '.inlier', 'w') as outFIle:
pickle.dump(data.getInlierMotion(), outFIle)
print('1')
with open(out + '/' + f + '.inlierRMS', 'w') as outFIle:
pickle.dump(data.getInlierRMS(), outFIle)
print('extracted2')
with open(out + '/' + f + '.tracks', 'w') as outFIle:
pickle.dump(data.getTotalTracks(), outFIle)
print('extracted3')
with open(out + '/' + f + '.delta', 'w') as outFIle:
pickle.dump(data.getDeltaMotion(), outFIle)
print('extracted4')
<|reserved_special_token_1|>
#!/usr/bin/env python
from bumblebee.motion import *
from simulation.path import *
from simulation.settings import *
import tf.transformations
from geometry_msgs.msg import TransformStamped,Transform,Quaternion,Vector3
from bumblebee.baseTypes import basicGraph,slidingGraph
from simulation.dataset import stereo_simulator_node
import pickle
import os
import rospy
import time
import scipy.stats.mstats as stat
from scipy.stats import norm,cauchy
import matplotlib.pyplot as plt
import matplotlib.style as sty
from mpl_toolkits.mplot3d import Axes3D
sty.use("seaborn")
from tf import TransformListener,TransformBroadcaster
from tf.transformations import *
import numpy as np
out="/home/ryan/recording/poseGraph/ORB/summary"
inNet="/home/ryan/recording/poseGraph/ORB"
#["5000_A1","5000_A2","5000_A3",
replayFiles=["5000_A5","5000_A6","5000_A12","5000_A13","5000_A14"]#,"/media/ryan/EXTRA/Simulation/50/G_0.3.gauss"]#,"/home/ryan/recording/poseGraph/5000_A2_full.pose"]
rospy.init_node("graph_poses_extract")
for f in replayFiles:
print("new SLiding Graph")
inlierData=[]
rmsData=[]
inlierRatio=[]
inFile=inNet+"/"+f+".pose"
with open(inFile,"r") as fread:
print(f)
data=pickle.load(fread)
print("Loaded")
with open(out+"/"+f+".inlier",'w') as outFIle:
pickle.dump(data.getInlierMotion(),outFIle)
print("1")
with open(out+"/"+f+".inlierRMS",'w') as outFIle:
pickle.dump(data.getInlierRMS(),outFIle)
print("extracted2")
with open(out+"/"+f+".tracks",'w') as outFIle:
pickle.dump(data.getTotalTracks(),outFIle)
print("extracted3")
with open(out+"/"+f+".delta",'w') as outFIle:
pickle.dump(data.getDeltaMotion(),outFIle)
print("extracted4")
# pickle.data.getInlierMotion())
# print("inlier")
# rmsData.append(data.getInlierRMS())
# print("rms")
# inlierRatio.append(data.getTotalTracks())
# print("totalTrc")
|
flexible
|
{
"blob_id": "4b3de2d817aa6f8b92d513bcdba612362becefdc",
"index": 9070,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsty.use('seaborn')\n<mask token>\nrospy.init_node('graph_poses_extract')\nfor f in replayFiles:\n print('new SLiding Graph')\n inlierData = []\n rmsData = []\n inlierRatio = []\n inFile = inNet + '/' + f + '.pose'\n with open(inFile, 'r') as fread:\n print(f)\n data = pickle.load(fread)\n print('Loaded')\n with open(out + '/' + f + '.inlier', 'w') as outFIle:\n pickle.dump(data.getInlierMotion(), outFIle)\n print('1')\n with open(out + '/' + f + '.inlierRMS', 'w') as outFIle:\n pickle.dump(data.getInlierRMS(), outFIle)\n print('extracted2')\n with open(out + '/' + f + '.tracks', 'w') as outFIle:\n pickle.dump(data.getTotalTracks(), outFIle)\n print('extracted3')\n with open(out + '/' + f + '.delta', 'w') as outFIle:\n pickle.dump(data.getDeltaMotion(), outFIle)\n print('extracted4')\n",
"step-3": "<mask token>\nsty.use('seaborn')\n<mask token>\nout = '/home/ryan/recording/poseGraph/ORB/summary'\ninNet = '/home/ryan/recording/poseGraph/ORB'\nreplayFiles = ['5000_A5', '5000_A6', '5000_A12', '5000_A13', '5000_A14']\nrospy.init_node('graph_poses_extract')\nfor f in replayFiles:\n print('new SLiding Graph')\n inlierData = []\n rmsData = []\n inlierRatio = []\n inFile = inNet + '/' + f + '.pose'\n with open(inFile, 'r') as fread:\n print(f)\n data = pickle.load(fread)\n print('Loaded')\n with open(out + '/' + f + '.inlier', 'w') as outFIle:\n pickle.dump(data.getInlierMotion(), outFIle)\n print('1')\n with open(out + '/' + f + '.inlierRMS', 'w') as outFIle:\n pickle.dump(data.getInlierRMS(), outFIle)\n print('extracted2')\n with open(out + '/' + f + '.tracks', 'w') as outFIle:\n pickle.dump(data.getTotalTracks(), outFIle)\n print('extracted3')\n with open(out + '/' + f + '.delta', 'w') as outFIle:\n pickle.dump(data.getDeltaMotion(), outFIle)\n print('extracted4')\n",
"step-4": "from bumblebee.motion import *\nfrom simulation.path import *\nfrom simulation.settings import *\nimport tf.transformations\nfrom geometry_msgs.msg import TransformStamped, Transform, Quaternion, Vector3\nfrom bumblebee.baseTypes import basicGraph, slidingGraph\nfrom simulation.dataset import stereo_simulator_node\nimport pickle\nimport os\nimport rospy\nimport time\nimport scipy.stats.mstats as stat\nfrom scipy.stats import norm, cauchy\nimport matplotlib.pyplot as plt\nimport matplotlib.style as sty\nfrom mpl_toolkits.mplot3d import Axes3D\nsty.use('seaborn')\nfrom tf import TransformListener, TransformBroadcaster\nfrom tf.transformations import *\nimport numpy as np\nout = '/home/ryan/recording/poseGraph/ORB/summary'\ninNet = '/home/ryan/recording/poseGraph/ORB'\nreplayFiles = ['5000_A5', '5000_A6', '5000_A12', '5000_A13', '5000_A14']\nrospy.init_node('graph_poses_extract')\nfor f in replayFiles:\n print('new SLiding Graph')\n inlierData = []\n rmsData = []\n inlierRatio = []\n inFile = inNet + '/' + f + '.pose'\n with open(inFile, 'r') as fread:\n print(f)\n data = pickle.load(fread)\n print('Loaded')\n with open(out + '/' + f + '.inlier', 'w') as outFIle:\n pickle.dump(data.getInlierMotion(), outFIle)\n print('1')\n with open(out + '/' + f + '.inlierRMS', 'w') as outFIle:\n pickle.dump(data.getInlierRMS(), outFIle)\n print('extracted2')\n with open(out + '/' + f + '.tracks', 'w') as outFIle:\n pickle.dump(data.getTotalTracks(), outFIle)\n print('extracted3')\n with open(out + '/' + f + '.delta', 'w') as outFIle:\n pickle.dump(data.getDeltaMotion(), outFIle)\n print('extracted4')\n",
"step-5": "#!/usr/bin/env python\n\nfrom bumblebee.motion import *\n\nfrom simulation.path import *\nfrom simulation.settings import *\nimport tf.transformations\nfrom geometry_msgs.msg import TransformStamped,Transform,Quaternion,Vector3\nfrom bumblebee.baseTypes import basicGraph,slidingGraph\nfrom simulation.dataset import stereo_simulator_node\nimport pickle\nimport os\nimport rospy\n\nimport time\nimport scipy.stats.mstats as stat\nfrom scipy.stats import norm,cauchy\nimport matplotlib.pyplot as plt\nimport matplotlib.style as sty\nfrom mpl_toolkits.mplot3d import Axes3D\nsty.use(\"seaborn\")\n\nfrom tf import TransformListener,TransformBroadcaster\nfrom tf.transformations import *\nimport numpy as np\n\n\nout=\"/home/ryan/recording/poseGraph/ORB/summary\"\ninNet=\"/home/ryan/recording/poseGraph/ORB\"\n#[\"5000_A1\",\"5000_A2\",\"5000_A3\",\nreplayFiles=[\"5000_A5\",\"5000_A6\",\"5000_A12\",\"5000_A13\",\"5000_A14\"]#,\"/media/ryan/EXTRA/Simulation/50/G_0.3.gauss\"]#,\"/home/ryan/recording/poseGraph/5000_A2_full.pose\"]\n\nrospy.init_node(\"graph_poses_extract\")\n\n\nfor f in replayFiles:\n print(\"new SLiding Graph\")\n inlierData=[]\n rmsData=[]\n inlierRatio=[]\n inFile=inNet+\"/\"+f+\".pose\"\n with open(inFile,\"r\") as fread:\n print(f)\n data=pickle.load(fread)\n print(\"Loaded\")\n with open(out+\"/\"+f+\".inlier\",'w') as outFIle:\n pickle.dump(data.getInlierMotion(),outFIle)\n print(\"1\")\n with open(out+\"/\"+f+\".inlierRMS\",'w') as outFIle:\n pickle.dump(data.getInlierRMS(),outFIle)\n print(\"extracted2\")\n with open(out+\"/\"+f+\".tracks\",'w') as outFIle:\n pickle.dump(data.getTotalTracks(),outFIle)\n print(\"extracted3\")\n with open(out+\"/\"+f+\".delta\",'w') as outFIle:\n pickle.dump(data.getDeltaMotion(),outFIle)\n print(\"extracted4\")\n # pickle.data.getInlierMotion())\n # print(\"inlier\")\n # rmsData.append(data.getInlierRMS())\n # print(\"rms\")\n # inlierRatio.append(data.getTotalTracks())\n # print(\"totalTrc\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import urllib
def normalize_mac_address(address):
return address.lower().replace("-", ":")
def urlencode(s):
return urllib.quote(s.encode("utf-8"), "")
def urlencode_plus(s):
return urllib.quote_plus(s.encode("utf-8"), "")
|
normal
|
{
"blob_id": "33b8baf2ca819315eaa5f16c7986390acb4d6efd",
"index": 878,
"step-1": "<mask token>\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\ndef urlencode(s):\n return urllib.quote(s.encode('utf-8'), '')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\ndef urlencode(s):\n return urllib.quote(s.encode('utf-8'), '')\n\n\ndef urlencode_plus(s):\n return urllib.quote_plus(s.encode('utf-8'), '')\n",
"step-4": "from __future__ import absolute_import, division, unicode_literals\nimport urllib\n\n\ndef normalize_mac_address(address):\n return address.lower().replace('-', ':')\n\n\ndef urlencode(s):\n return urllib.quote(s.encode('utf-8'), '')\n\n\ndef urlencode_plus(s):\n return urllib.quote_plus(s.encode('utf-8'), '')\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport urllib\n\n\ndef normalize_mac_address(address):\n return address.lower().replace(\"-\", \":\")\n\n\ndef urlencode(s):\n return urllib.quote(s.encode(\"utf-8\"), \"\")\n\n\ndef urlencode_plus(s):\n return urllib.quote_plus(s.encode(\"utf-8\"), \"\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MsecDebugger(DebuggerBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def debugger_app(self):
"""
Returns the name of the debugger application to use in this class
"""
typical = (
'C:\\Program Files\\Debugging Tools for Windows (x86)\\cdb.exe')
if os.path.exists(typical):
return typical
return 'cdb'
def debugger_test(self):
"""
Returns a command line (as list) that can be run via subprocess.call
to confirm whether the debugger is on the path.
"""
return [self.debugger_app(), '-version']
def _get_cmdline(self, outfile):
cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command
args = []
args.append(self.debugger_app())
args.append('-amsec.dll')
if hasattr(self, 'debugheap') and self.debugheap:
pass
else:
args.extend(('-hd', '-xd', 'gp'))
args.extend(('-logo', outfile))
args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))
for self.exception_depth in xrange(0, self.exception_depth):
cdb_command = 'g;' + cdb_command
args.append(cdb_command)
args.append(self.program)
args.extend(self.cmd_args)
for l in pformat(args).splitlines():
logger.debug('dbg_args: %s', l)
return args
def _find_debug_target(self, exename, trycount=5):
pid = None
attempts = 0
foundpid = False
if self.watchcpu:
while attempts < trycount and not foundpid:
for process in self.wmiInterface.Win32_Process(name=exename):
pid = process.ProcessID
logger.debug('Found %s PID: %s', exename, pid)
foundpid = True
attempts += 1
if not foundpid and attempts < trycount:
logger.debug('%s not seen yet. Retrying...', exename)
time.sleep(0.1)
if not pid:
logger.debug('Cannot find %s child process!', exename)
return pid
def run_with_timer(self):
exename = os.path.basename(self.program)
process_info = {}
child_pid = None
done = False
started = False
args = self._get_cmdline(self.outfile)
p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.
devnull, 'w'), universal_newlines=True)
self.savedpid = p.pid
child_pid = self._find_debug_target(exename, trycount=5)
if child_pid is None and self.watchcpu:
logger.debug('Bailing on debugger iteration')
self.kill(self.savedpid, 99)
return
self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])
self.t.start()
if self.watchcpu:
while p.poll() is None and not done and child_pid:
for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(
IDProcess=child_pid):
n1, d1 = long(proc.PercentProcessorTime), long(proc.
Timestamp_Sys100NS)
n0, d0 = process_info.get(child_pid, (0, 0))
try:
percent_processor_time = float(n1 - n0) / float(d1 - d0
) * 100.0
except ZeroDivisionError:
percent_processor_time = 0.0
process_info[child_pid] = n1, d1
logger.debug('Process %s CPU usage: %s', child_pid,
percent_processor_time)
if percent_processor_time < 1e-10:
if started:
logger.debug(
'killing cdb session for %s due to CPU inactivity'
, child_pid)
done = True
self.kill(self.savedpid, 99)
else:
started = True
if not done:
time.sleep(0.2)
else:
p.wait()
self.t.cancel()
def go(self):
"""run cdb and process output"""
if self.exception_depth > 0:
self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self
.exception_depth) + os.path.splitext(self.outfile)[1]
self.run_with_timer()
if not os.path.exists(self.outfile):
open(self.outfile, 'w').close()
parsed = MsecFile(self.outfile)
for l in pformat(parsed.__dict__).splitlines():
logger.debug('parsed: %s', l)
return parsed
def __exit__(self, etype, value, traceback):
if self.t:
logger.debug('Canceling timer...')
self.t.cancel()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MsecDebugger(DebuggerBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def kill(self, pid, returncode):
"""kill function for Win32"""
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 1, pid)
ret = kernel32.TerminateProcess(handle, returncode)
kernel32.CloseHandle(handle)
return 0 != ret
def debugger_app(self):
"""
Returns the name of the debugger application to use in this class
"""
typical = (
'C:\\Program Files\\Debugging Tools for Windows (x86)\\cdb.exe')
if os.path.exists(typical):
return typical
return 'cdb'
def debugger_test(self):
"""
Returns a command line (as list) that can be run via subprocess.call
to confirm whether the debugger is on the path.
"""
return [self.debugger_app(), '-version']
def _get_cmdline(self, outfile):
cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command
args = []
args.append(self.debugger_app())
args.append('-amsec.dll')
if hasattr(self, 'debugheap') and self.debugheap:
pass
else:
args.extend(('-hd', '-xd', 'gp'))
args.extend(('-logo', outfile))
args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))
for self.exception_depth in xrange(0, self.exception_depth):
cdb_command = 'g;' + cdb_command
args.append(cdb_command)
args.append(self.program)
args.extend(self.cmd_args)
for l in pformat(args).splitlines():
logger.debug('dbg_args: %s', l)
return args
def _find_debug_target(self, exename, trycount=5):
pid = None
attempts = 0
foundpid = False
if self.watchcpu:
while attempts < trycount and not foundpid:
for process in self.wmiInterface.Win32_Process(name=exename):
pid = process.ProcessID
logger.debug('Found %s PID: %s', exename, pid)
foundpid = True
attempts += 1
if not foundpid and attempts < trycount:
logger.debug('%s not seen yet. Retrying...', exename)
time.sleep(0.1)
if not pid:
logger.debug('Cannot find %s child process!', exename)
return pid
def run_with_timer(self):
exename = os.path.basename(self.program)
process_info = {}
child_pid = None
done = False
started = False
args = self._get_cmdline(self.outfile)
p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.
devnull, 'w'), universal_newlines=True)
self.savedpid = p.pid
child_pid = self._find_debug_target(exename, trycount=5)
if child_pid is None and self.watchcpu:
logger.debug('Bailing on debugger iteration')
self.kill(self.savedpid, 99)
return
self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])
self.t.start()
if self.watchcpu:
while p.poll() is None and not done and child_pid:
for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(
IDProcess=child_pid):
n1, d1 = long(proc.PercentProcessorTime), long(proc.
Timestamp_Sys100NS)
n0, d0 = process_info.get(child_pid, (0, 0))
try:
percent_processor_time = float(n1 - n0) / float(d1 - d0
) * 100.0
except ZeroDivisionError:
percent_processor_time = 0.0
process_info[child_pid] = n1, d1
logger.debug('Process %s CPU usage: %s', child_pid,
percent_processor_time)
if percent_processor_time < 1e-10:
if started:
logger.debug(
'killing cdb session for %s due to CPU inactivity'
, child_pid)
done = True
self.kill(self.savedpid, 99)
else:
started = True
if not done:
time.sleep(0.2)
else:
p.wait()
self.t.cancel()
def go(self):
"""run cdb and process output"""
if self.exception_depth > 0:
self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self
.exception_depth) + os.path.splitext(self.outfile)[1]
self.run_with_timer()
if not os.path.exists(self.outfile):
open(self.outfile, 'w').close()
parsed = MsecFile(self.outfile)
for l in pformat(parsed.__dict__).splitlines():
logger.debug('parsed: %s', l)
return parsed
def __exit__(self, etype, value, traceback):
if self.t:
logger.debug('Canceling timer...')
self.t.cancel()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def factory(options):
return MsecDebugger(options)
class MsecDebugger(DebuggerBase):
_platform = 'Windows'
_key = 'msec'
_ext = 'msec'
def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu,
exception_depth=0, cdb_command='!exploitable -v', debug_heap=False,
**options):
DebuggerBase.__init__(self, program, cmd_args, outfile_base,
timeout, **options)
self.exception_depth = exception_depth
self.watchcpu = watchcpu
if watchcpu:
self.wmiInterface = wmi.WMI()
self.t = None
self.savedpid = None
self.cdb_command = cdb_command
self.debugheap = debug_heap
def kill(self, pid, returncode):
"""kill function for Win32"""
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 1, pid)
ret = kernel32.TerminateProcess(handle, returncode)
kernel32.CloseHandle(handle)
return 0 != ret
def debugger_app(self):
"""
Returns the name of the debugger application to use in this class
"""
typical = (
'C:\\Program Files\\Debugging Tools for Windows (x86)\\cdb.exe')
if os.path.exists(typical):
return typical
return 'cdb'
def debugger_test(self):
"""
Returns a command line (as list) that can be run via subprocess.call
to confirm whether the debugger is on the path.
"""
return [self.debugger_app(), '-version']
def _get_cmdline(self, outfile):
cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command
args = []
args.append(self.debugger_app())
args.append('-amsec.dll')
if hasattr(self, 'debugheap') and self.debugheap:
pass
else:
args.extend(('-hd', '-xd', 'gp'))
args.extend(('-logo', outfile))
args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))
for self.exception_depth in xrange(0, self.exception_depth):
cdb_command = 'g;' + cdb_command
args.append(cdb_command)
args.append(self.program)
args.extend(self.cmd_args)
for l in pformat(args).splitlines():
logger.debug('dbg_args: %s', l)
return args
def _find_debug_target(self, exename, trycount=5):
pid = None
attempts = 0
foundpid = False
if self.watchcpu:
while attempts < trycount and not foundpid:
for process in self.wmiInterface.Win32_Process(name=exename):
pid = process.ProcessID
logger.debug('Found %s PID: %s', exename, pid)
foundpid = True
attempts += 1
if not foundpid and attempts < trycount:
logger.debug('%s not seen yet. Retrying...', exename)
time.sleep(0.1)
if not pid:
logger.debug('Cannot find %s child process!', exename)
return pid
def run_with_timer(self):
exename = os.path.basename(self.program)
process_info = {}
child_pid = None
done = False
started = False
args = self._get_cmdline(self.outfile)
p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.
devnull, 'w'), universal_newlines=True)
self.savedpid = p.pid
child_pid = self._find_debug_target(exename, trycount=5)
if child_pid is None and self.watchcpu:
logger.debug('Bailing on debugger iteration')
self.kill(self.savedpid, 99)
return
self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])
self.t.start()
if self.watchcpu:
while p.poll() is None and not done and child_pid:
for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(
IDProcess=child_pid):
n1, d1 = long(proc.PercentProcessorTime), long(proc.
Timestamp_Sys100NS)
n0, d0 = process_info.get(child_pid, (0, 0))
try:
percent_processor_time = float(n1 - n0) / float(d1 - d0
) * 100.0
except ZeroDivisionError:
percent_processor_time = 0.0
process_info[child_pid] = n1, d1
logger.debug('Process %s CPU usage: %s', child_pid,
percent_processor_time)
if percent_processor_time < 1e-10:
if started:
logger.debug(
'killing cdb session for %s due to CPU inactivity'
, child_pid)
done = True
self.kill(self.savedpid, 99)
else:
started = True
if not done:
time.sleep(0.2)
else:
p.wait()
self.t.cancel()
def go(self):
"""run cdb and process output"""
if self.exception_depth > 0:
self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self
.exception_depth) + os.path.splitext(self.outfile)[1]
self.run_with_timer()
if not os.path.exists(self.outfile):
open(self.outfile, 'w').close()
parsed = MsecFile(self.outfile)
for l in pformat(parsed.__dict__).splitlines():
logger.debug('parsed: %s', l)
return parsed
def __exit__(self, etype, value, traceback):
if self.t:
logger.debug('Canceling timer...')
self.t.cancel()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import ctypes
import logging
import os
from pprint import pformat
from subprocess import Popen
from threading import Timer
import time
from certfuzz.debuggers.debugger_base import Debugger as DebuggerBase
from certfuzz.debuggers.output_parsers.msec_file import MsecFile
import sys
if sys.platform.startswith('win'):
import wmi
logger = logging.getLogger(__name__)
def factory(options):
return MsecDebugger(options)
class MsecDebugger(DebuggerBase):
_platform = 'Windows'
_key = 'msec'
_ext = 'msec'
def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu,
exception_depth=0, cdb_command='!exploitable -v', debug_heap=False,
**options):
DebuggerBase.__init__(self, program, cmd_args, outfile_base,
timeout, **options)
self.exception_depth = exception_depth
self.watchcpu = watchcpu
if watchcpu:
self.wmiInterface = wmi.WMI()
self.t = None
self.savedpid = None
self.cdb_command = cdb_command
self.debugheap = debug_heap
def kill(self, pid, returncode):
"""kill function for Win32"""
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 1, pid)
ret = kernel32.TerminateProcess(handle, returncode)
kernel32.CloseHandle(handle)
return 0 != ret
def debugger_app(self):
"""
Returns the name of the debugger application to use in this class
"""
typical = (
'C:\\Program Files\\Debugging Tools for Windows (x86)\\cdb.exe')
if os.path.exists(typical):
return typical
return 'cdb'
def debugger_test(self):
"""
Returns a command line (as list) that can be run via subprocess.call
to confirm whether the debugger is on the path.
"""
return [self.debugger_app(), '-version']
def _get_cmdline(self, outfile):
cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command
args = []
args.append(self.debugger_app())
args.append('-amsec.dll')
if hasattr(self, 'debugheap') and self.debugheap:
pass
else:
args.extend(('-hd', '-xd', 'gp'))
args.extend(('-logo', outfile))
args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))
for self.exception_depth in xrange(0, self.exception_depth):
cdb_command = 'g;' + cdb_command
args.append(cdb_command)
args.append(self.program)
args.extend(self.cmd_args)
for l in pformat(args).splitlines():
logger.debug('dbg_args: %s', l)
return args
def _find_debug_target(self, exename, trycount=5):
pid = None
attempts = 0
foundpid = False
if self.watchcpu:
while attempts < trycount and not foundpid:
for process in self.wmiInterface.Win32_Process(name=exename):
pid = process.ProcessID
logger.debug('Found %s PID: %s', exename, pid)
foundpid = True
attempts += 1
if not foundpid and attempts < trycount:
logger.debug('%s not seen yet. Retrying...', exename)
time.sleep(0.1)
if not pid:
logger.debug('Cannot find %s child process!', exename)
return pid
def run_with_timer(self):
exename = os.path.basename(self.program)
process_info = {}
child_pid = None
done = False
started = False
args = self._get_cmdline(self.outfile)
p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.
devnull, 'w'), universal_newlines=True)
self.savedpid = p.pid
child_pid = self._find_debug_target(exename, trycount=5)
if child_pid is None and self.watchcpu:
logger.debug('Bailing on debugger iteration')
self.kill(self.savedpid, 99)
return
self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])
self.t.start()
if self.watchcpu:
while p.poll() is None and not done and child_pid:
for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(
IDProcess=child_pid):
n1, d1 = long(proc.PercentProcessorTime), long(proc.
Timestamp_Sys100NS)
n0, d0 = process_info.get(child_pid, (0, 0))
try:
percent_processor_time = float(n1 - n0) / float(d1 - d0
) * 100.0
except ZeroDivisionError:
percent_processor_time = 0.0
process_info[child_pid] = n1, d1
logger.debug('Process %s CPU usage: %s', child_pid,
percent_processor_time)
if percent_processor_time < 1e-10:
if started:
logger.debug(
'killing cdb session for %s due to CPU inactivity'
, child_pid)
done = True
self.kill(self.savedpid, 99)
else:
started = True
if not done:
time.sleep(0.2)
else:
p.wait()
self.t.cancel()
def go(self):
"""run cdb and process output"""
if self.exception_depth > 0:
self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self
.exception_depth) + os.path.splitext(self.outfile)[1]
self.run_with_timer()
if not os.path.exists(self.outfile):
open(self.outfile, 'w').close()
parsed = MsecFile(self.outfile)
for l in pformat(parsed.__dict__).splitlines():
logger.debug('parsed: %s', l)
return parsed
def __exit__(self, etype, value, traceback):
if self.t:
logger.debug('Canceling timer...')
self.t.cancel()
<|reserved_special_token_1|>
"""This module runs cdb on a process and !exploitable on any exceptions.
"""
import ctypes
import logging
import os
from pprint import pformat
from subprocess import Popen
from threading import Timer
import time
from certfuzz.debuggers.debugger_base import Debugger as DebuggerBase
from certfuzz.debuggers.output_parsers.msec_file import MsecFile
import sys
if sys.platform.startswith('win'):
import wmi
logger = logging.getLogger(__name__)
def factory(options):
return MsecDebugger(options)
class MsecDebugger(DebuggerBase):
_platform = 'Windows'
_key = 'msec'
_ext = 'msec'
def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu, exception_depth=0, cdb_command='!exploitable -v', debug_heap=False, ** options):
DebuggerBase.__init__(
self, program, cmd_args, outfile_base, timeout, **options)
self.exception_depth = exception_depth
self.watchcpu = watchcpu
if watchcpu:
self.wmiInterface = wmi.WMI()
self.t = None
self.savedpid = None
self.cdb_command = cdb_command
self.debugheap = debug_heap
def kill(self, pid, returncode):
"""kill function for Win32"""
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 1, pid)
ret = kernel32.TerminateProcess(handle, returncode)
kernel32.CloseHandle(handle)
return (0 != ret)
def debugger_app(self):
'''
Returns the name of the debugger application to use in this class
'''
typical = "C:\\Program Files\\Debugging Tools for Windows (x86)\\cdb.exe"
if os.path.exists(typical):
return typical
return 'cdb'
def debugger_test(self):
'''
Returns a command line (as list) that can be run via subprocess.call
to confirm whether the debugger is on the path.
'''
return [self.debugger_app(), '-version']
def _get_cmdline(self, outfile):
cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command
args = []
args.append(self.debugger_app())
args.append('-amsec.dll')
if hasattr(self, 'debugheap') and self.debugheap:
# do not use hd, xd options if debugheap is set
pass
else:
args.extend(('-hd', '-xd', 'gp'))
args.extend(('-logo', outfile))
args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))
for self.exception_depth in xrange(0, self.exception_depth):
cdb_command = 'g;' + cdb_command
args.append(cdb_command)
args.append(self.program)
args.extend(self.cmd_args)
for l in pformat(args).splitlines():
logger.debug('dbg_args: %s', l)
return args
def _find_debug_target(self, exename, trycount=5):
pid = None
attempts = 0
foundpid = False
if self.watchcpu:
while attempts < trycount and not foundpid:
for process in self.wmiInterface.Win32_Process(name=exename):
# TODO: What if there's more than one?
pid = process.ProcessID
logger.debug('Found %s PID: %s', exename, pid)
foundpid = True
attempts += 1
if not foundpid and attempts < trycount:
logger.debug('%s not seen yet. Retrying...', exename)
time.sleep(0.1)
if not pid:
logger.debug('Cannot find %s child process!', exename)
return pid
def run_with_timer(self):
# TODO: replace this with subp.run_with_timer()
exename = os.path.basename(self.program)
process_info = {}
child_pid = None
done = False
started = False
args = self._get_cmdline(self.outfile)
p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'),
universal_newlines=True)
self.savedpid = p.pid
child_pid = self._find_debug_target(exename, trycount=5)
if child_pid is None and self.watchcpu:
logger.debug('Bailing on debugger iteration')
self.kill(self.savedpid, 99)
return
# create a timer that calls kill() when it expires
self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])
self.t.start()
if self.watchcpu:
# This is a race. In some cases, a GUI app could be done before we can even measure it
# TODO: Do something about it
while p.poll() is None and not done and child_pid:
for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(IDProcess=child_pid):
n1, d1 = long(proc.PercentProcessorTime), long(
proc.Timestamp_Sys100NS)
n0, d0 = process_info.get(child_pid, (0, 0))
try:
percent_processor_time = (
float(n1 - n0) / float(d1 - d0)) * 100.0
except ZeroDivisionError:
percent_processor_time = 0.0
process_info[child_pid] = (n1, d1)
logger.debug(
'Process %s CPU usage: %s', child_pid, percent_processor_time)
if percent_processor_time < 0.0000000001:
if started:
logger.debug(
'killing cdb session for %s due to CPU inactivity', child_pid)
done = True
self.kill(self.savedpid, 99)
else:
# Detected CPU usage. Now look for it to drop near zero
started = True
if not done:
time.sleep(0.2)
else:
p.wait()
self.t.cancel()
def go(self):
"""run cdb and process output"""
# For exceptions beyond the first one, put the handled exception number
# in the name
if self.exception_depth > 0:
self.outfile = os.path.splitext(self.outfile)[
0] + '.e' + str(self.exception_depth) + os.path.splitext(self.outfile)[1]
self.run_with_timer()
if not os.path.exists(self.outfile):
# touch it if it doesn't exist
open(self.outfile, 'w').close()
parsed = MsecFile(self.outfile)
for l in pformat(parsed.__dict__).splitlines():
logger.debug('parsed: %s', l)
return parsed
def __exit__(self, etype, value, traceback):
if self.t:
logger.debug('Canceling timer...')
self.t.cancel()
# END MsecDebugger
|
flexible
|
{
"blob_id": "706f8d83bc9b4fab6f6d365c047c33913daece61",
"index": 5014,
"step-1": "<mask token>\n\n\nclass MsecDebugger(DebuggerBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def debugger_app(self):\n \"\"\"\n Returns the name of the debugger application to use in this class\n \"\"\"\n typical = (\n 'C:\\\\Program Files\\\\Debugging Tools for Windows (x86)\\\\cdb.exe')\n if os.path.exists(typical):\n return typical\n return 'cdb'\n\n def debugger_test(self):\n \"\"\"\n Returns a command line (as list) that can be run via subprocess.call\n to confirm whether the debugger is on the path.\n \"\"\"\n return [self.debugger_app(), '-version']\n\n def _get_cmdline(self, outfile):\n cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command\n args = []\n args.append(self.debugger_app())\n args.append('-amsec.dll')\n if hasattr(self, 'debugheap') and self.debugheap:\n pass\n else:\n args.extend(('-hd', '-xd', 'gp'))\n args.extend(('-logo', outfile))\n args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))\n for self.exception_depth in xrange(0, self.exception_depth):\n cdb_command = 'g;' + cdb_command\n args.append(cdb_command)\n args.append(self.program)\n args.extend(self.cmd_args)\n for l in pformat(args).splitlines():\n logger.debug('dbg_args: %s', l)\n return args\n\n def _find_debug_target(self, exename, trycount=5):\n pid = None\n attempts = 0\n foundpid = False\n if self.watchcpu:\n while attempts < trycount and not foundpid:\n for process in self.wmiInterface.Win32_Process(name=exename):\n pid = process.ProcessID\n logger.debug('Found %s PID: %s', exename, pid)\n foundpid = True\n attempts += 1\n if not foundpid and attempts < trycount:\n logger.debug('%s not seen yet. Retrying...', exename)\n time.sleep(0.1)\n if not pid:\n logger.debug('Cannot find %s child process!', exename)\n return pid\n\n def run_with_timer(self):\n exename = os.path.basename(self.program)\n process_info = {}\n child_pid = None\n done = False\n started = False\n args = self._get_cmdline(self.outfile)\n p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.\n devnull, 'w'), universal_newlines=True)\n self.savedpid = p.pid\n child_pid = self._find_debug_target(exename, trycount=5)\n if child_pid is None and self.watchcpu:\n logger.debug('Bailing on debugger iteration')\n self.kill(self.savedpid, 99)\n return\n self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])\n self.t.start()\n if self.watchcpu:\n while p.poll() is None and not done and child_pid:\n for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(\n IDProcess=child_pid):\n n1, d1 = long(proc.PercentProcessorTime), long(proc.\n Timestamp_Sys100NS)\n n0, d0 = process_info.get(child_pid, (0, 0))\n try:\n percent_processor_time = float(n1 - n0) / float(d1 - d0\n ) * 100.0\n except ZeroDivisionError:\n percent_processor_time = 0.0\n process_info[child_pid] = n1, d1\n logger.debug('Process %s CPU usage: %s', child_pid,\n percent_processor_time)\n if percent_processor_time < 1e-10:\n if started:\n logger.debug(\n 'killing cdb session for %s due to CPU inactivity'\n , child_pid)\n done = True\n self.kill(self.savedpid, 99)\n else:\n started = True\n if not done:\n time.sleep(0.2)\n else:\n p.wait()\n self.t.cancel()\n\n def go(self):\n \"\"\"run cdb and process output\"\"\"\n if self.exception_depth > 0:\n self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self\n .exception_depth) + os.path.splitext(self.outfile)[1]\n self.run_with_timer()\n if not os.path.exists(self.outfile):\n open(self.outfile, 'w').close()\n parsed = MsecFile(self.outfile)\n for l in pformat(parsed.__dict__).splitlines():\n logger.debug('parsed: %s', l)\n return parsed\n\n def __exit__(self, etype, value, traceback):\n if self.t:\n logger.debug('Canceling timer...')\n self.t.cancel()\n",
"step-2": "<mask token>\n\n\nclass MsecDebugger(DebuggerBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def kill(self, pid, returncode):\n \"\"\"kill function for Win32\"\"\"\n kernel32 = ctypes.windll.kernel32\n handle = kernel32.OpenProcess(1, 1, pid)\n ret = kernel32.TerminateProcess(handle, returncode)\n kernel32.CloseHandle(handle)\n return 0 != ret\n\n def debugger_app(self):\n \"\"\"\n Returns the name of the debugger application to use in this class\n \"\"\"\n typical = (\n 'C:\\\\Program Files\\\\Debugging Tools for Windows (x86)\\\\cdb.exe')\n if os.path.exists(typical):\n return typical\n return 'cdb'\n\n def debugger_test(self):\n \"\"\"\n Returns a command line (as list) that can be run via subprocess.call\n to confirm whether the debugger is on the path.\n \"\"\"\n return [self.debugger_app(), '-version']\n\n def _get_cmdline(self, outfile):\n cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command\n args = []\n args.append(self.debugger_app())\n args.append('-amsec.dll')\n if hasattr(self, 'debugheap') and self.debugheap:\n pass\n else:\n args.extend(('-hd', '-xd', 'gp'))\n args.extend(('-logo', outfile))\n args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))\n for self.exception_depth in xrange(0, self.exception_depth):\n cdb_command = 'g;' + cdb_command\n args.append(cdb_command)\n args.append(self.program)\n args.extend(self.cmd_args)\n for l in pformat(args).splitlines():\n logger.debug('dbg_args: %s', l)\n return args\n\n def _find_debug_target(self, exename, trycount=5):\n pid = None\n attempts = 0\n foundpid = False\n if self.watchcpu:\n while attempts < trycount and not foundpid:\n for process in self.wmiInterface.Win32_Process(name=exename):\n pid = process.ProcessID\n logger.debug('Found %s PID: %s', exename, pid)\n foundpid = True\n attempts += 1\n if not foundpid and attempts < trycount:\n logger.debug('%s not seen yet. Retrying...', exename)\n time.sleep(0.1)\n if not pid:\n logger.debug('Cannot find %s child process!', exename)\n return pid\n\n def run_with_timer(self):\n exename = os.path.basename(self.program)\n process_info = {}\n child_pid = None\n done = False\n started = False\n args = self._get_cmdline(self.outfile)\n p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.\n devnull, 'w'), universal_newlines=True)\n self.savedpid = p.pid\n child_pid = self._find_debug_target(exename, trycount=5)\n if child_pid is None and self.watchcpu:\n logger.debug('Bailing on debugger iteration')\n self.kill(self.savedpid, 99)\n return\n self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])\n self.t.start()\n if self.watchcpu:\n while p.poll() is None and not done and child_pid:\n for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(\n IDProcess=child_pid):\n n1, d1 = long(proc.PercentProcessorTime), long(proc.\n Timestamp_Sys100NS)\n n0, d0 = process_info.get(child_pid, (0, 0))\n try:\n percent_processor_time = float(n1 - n0) / float(d1 - d0\n ) * 100.0\n except ZeroDivisionError:\n percent_processor_time = 0.0\n process_info[child_pid] = n1, d1\n logger.debug('Process %s CPU usage: %s', child_pid,\n percent_processor_time)\n if percent_processor_time < 1e-10:\n if started:\n logger.debug(\n 'killing cdb session for %s due to CPU inactivity'\n , child_pid)\n done = True\n self.kill(self.savedpid, 99)\n else:\n started = True\n if not done:\n time.sleep(0.2)\n else:\n p.wait()\n self.t.cancel()\n\n def go(self):\n \"\"\"run cdb and process output\"\"\"\n if self.exception_depth > 0:\n self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self\n .exception_depth) + os.path.splitext(self.outfile)[1]\n self.run_with_timer()\n if not os.path.exists(self.outfile):\n open(self.outfile, 'w').close()\n parsed = MsecFile(self.outfile)\n for l in pformat(parsed.__dict__).splitlines():\n logger.debug('parsed: %s', l)\n return parsed\n\n def __exit__(self, etype, value, traceback):\n if self.t:\n logger.debug('Canceling timer...')\n self.t.cancel()\n",
"step-3": "<mask token>\n\n\ndef factory(options):\n return MsecDebugger(options)\n\n\nclass MsecDebugger(DebuggerBase):\n _platform = 'Windows'\n _key = 'msec'\n _ext = 'msec'\n\n def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu,\n exception_depth=0, cdb_command='!exploitable -v', debug_heap=False,\n **options):\n DebuggerBase.__init__(self, program, cmd_args, outfile_base,\n timeout, **options)\n self.exception_depth = exception_depth\n self.watchcpu = watchcpu\n if watchcpu:\n self.wmiInterface = wmi.WMI()\n self.t = None\n self.savedpid = None\n self.cdb_command = cdb_command\n self.debugheap = debug_heap\n\n def kill(self, pid, returncode):\n \"\"\"kill function for Win32\"\"\"\n kernel32 = ctypes.windll.kernel32\n handle = kernel32.OpenProcess(1, 1, pid)\n ret = kernel32.TerminateProcess(handle, returncode)\n kernel32.CloseHandle(handle)\n return 0 != ret\n\n def debugger_app(self):\n \"\"\"\n Returns the name of the debugger application to use in this class\n \"\"\"\n typical = (\n 'C:\\\\Program Files\\\\Debugging Tools for Windows (x86)\\\\cdb.exe')\n if os.path.exists(typical):\n return typical\n return 'cdb'\n\n def debugger_test(self):\n \"\"\"\n Returns a command line (as list) that can be run via subprocess.call\n to confirm whether the debugger is on the path.\n \"\"\"\n return [self.debugger_app(), '-version']\n\n def _get_cmdline(self, outfile):\n cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command\n args = []\n args.append(self.debugger_app())\n args.append('-amsec.dll')\n if hasattr(self, 'debugheap') and self.debugheap:\n pass\n else:\n args.extend(('-hd', '-xd', 'gp'))\n args.extend(('-logo', outfile))\n args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))\n for self.exception_depth in xrange(0, self.exception_depth):\n cdb_command = 'g;' + cdb_command\n args.append(cdb_command)\n args.append(self.program)\n args.extend(self.cmd_args)\n for l in pformat(args).splitlines():\n logger.debug('dbg_args: %s', l)\n return args\n\n def _find_debug_target(self, exename, trycount=5):\n pid = None\n attempts = 0\n foundpid = False\n if self.watchcpu:\n while attempts < trycount and not foundpid:\n for process in self.wmiInterface.Win32_Process(name=exename):\n pid = process.ProcessID\n logger.debug('Found %s PID: %s', exename, pid)\n foundpid = True\n attempts += 1\n if not foundpid and attempts < trycount:\n logger.debug('%s not seen yet. Retrying...', exename)\n time.sleep(0.1)\n if not pid:\n logger.debug('Cannot find %s child process!', exename)\n return pid\n\n def run_with_timer(self):\n exename = os.path.basename(self.program)\n process_info = {}\n child_pid = None\n done = False\n started = False\n args = self._get_cmdline(self.outfile)\n p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.\n devnull, 'w'), universal_newlines=True)\n self.savedpid = p.pid\n child_pid = self._find_debug_target(exename, trycount=5)\n if child_pid is None and self.watchcpu:\n logger.debug('Bailing on debugger iteration')\n self.kill(self.savedpid, 99)\n return\n self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])\n self.t.start()\n if self.watchcpu:\n while p.poll() is None and not done and child_pid:\n for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(\n IDProcess=child_pid):\n n1, d1 = long(proc.PercentProcessorTime), long(proc.\n Timestamp_Sys100NS)\n n0, d0 = process_info.get(child_pid, (0, 0))\n try:\n percent_processor_time = float(n1 - n0) / float(d1 - d0\n ) * 100.0\n except ZeroDivisionError:\n percent_processor_time = 0.0\n process_info[child_pid] = n1, d1\n logger.debug('Process %s CPU usage: %s', child_pid,\n percent_processor_time)\n if percent_processor_time < 1e-10:\n if started:\n logger.debug(\n 'killing cdb session for %s due to CPU inactivity'\n , child_pid)\n done = True\n self.kill(self.savedpid, 99)\n else:\n started = True\n if not done:\n time.sleep(0.2)\n else:\n p.wait()\n self.t.cancel()\n\n def go(self):\n \"\"\"run cdb and process output\"\"\"\n if self.exception_depth > 0:\n self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self\n .exception_depth) + os.path.splitext(self.outfile)[1]\n self.run_with_timer()\n if not os.path.exists(self.outfile):\n open(self.outfile, 'w').close()\n parsed = MsecFile(self.outfile)\n for l in pformat(parsed.__dict__).splitlines():\n logger.debug('parsed: %s', l)\n return parsed\n\n def __exit__(self, etype, value, traceback):\n if self.t:\n logger.debug('Canceling timer...')\n self.t.cancel()\n",
"step-4": "<mask token>\nimport ctypes\nimport logging\nimport os\nfrom pprint import pformat\nfrom subprocess import Popen\nfrom threading import Timer\nimport time\nfrom certfuzz.debuggers.debugger_base import Debugger as DebuggerBase\nfrom certfuzz.debuggers.output_parsers.msec_file import MsecFile\nimport sys\nif sys.platform.startswith('win'):\n import wmi\nlogger = logging.getLogger(__name__)\n\n\ndef factory(options):\n return MsecDebugger(options)\n\n\nclass MsecDebugger(DebuggerBase):\n _platform = 'Windows'\n _key = 'msec'\n _ext = 'msec'\n\n def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu,\n exception_depth=0, cdb_command='!exploitable -v', debug_heap=False,\n **options):\n DebuggerBase.__init__(self, program, cmd_args, outfile_base,\n timeout, **options)\n self.exception_depth = exception_depth\n self.watchcpu = watchcpu\n if watchcpu:\n self.wmiInterface = wmi.WMI()\n self.t = None\n self.savedpid = None\n self.cdb_command = cdb_command\n self.debugheap = debug_heap\n\n def kill(self, pid, returncode):\n \"\"\"kill function for Win32\"\"\"\n kernel32 = ctypes.windll.kernel32\n handle = kernel32.OpenProcess(1, 1, pid)\n ret = kernel32.TerminateProcess(handle, returncode)\n kernel32.CloseHandle(handle)\n return 0 != ret\n\n def debugger_app(self):\n \"\"\"\n Returns the name of the debugger application to use in this class\n \"\"\"\n typical = (\n 'C:\\\\Program Files\\\\Debugging Tools for Windows (x86)\\\\cdb.exe')\n if os.path.exists(typical):\n return typical\n return 'cdb'\n\n def debugger_test(self):\n \"\"\"\n Returns a command line (as list) that can be run via subprocess.call\n to confirm whether the debugger is on the path.\n \"\"\"\n return [self.debugger_app(), '-version']\n\n def _get_cmdline(self, outfile):\n cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command\n args = []\n args.append(self.debugger_app())\n args.append('-amsec.dll')\n if hasattr(self, 'debugheap') and self.debugheap:\n pass\n else:\n args.extend(('-hd', '-xd', 'gp'))\n args.extend(('-logo', outfile))\n args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))\n for self.exception_depth in xrange(0, self.exception_depth):\n cdb_command = 'g;' + cdb_command\n args.append(cdb_command)\n args.append(self.program)\n args.extend(self.cmd_args)\n for l in pformat(args).splitlines():\n logger.debug('dbg_args: %s', l)\n return args\n\n def _find_debug_target(self, exename, trycount=5):\n pid = None\n attempts = 0\n foundpid = False\n if self.watchcpu:\n while attempts < trycount and not foundpid:\n for process in self.wmiInterface.Win32_Process(name=exename):\n pid = process.ProcessID\n logger.debug('Found %s PID: %s', exename, pid)\n foundpid = True\n attempts += 1\n if not foundpid and attempts < trycount:\n logger.debug('%s not seen yet. Retrying...', exename)\n time.sleep(0.1)\n if not pid:\n logger.debug('Cannot find %s child process!', exename)\n return pid\n\n def run_with_timer(self):\n exename = os.path.basename(self.program)\n process_info = {}\n child_pid = None\n done = False\n started = False\n args = self._get_cmdline(self.outfile)\n p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.\n devnull, 'w'), universal_newlines=True)\n self.savedpid = p.pid\n child_pid = self._find_debug_target(exename, trycount=5)\n if child_pid is None and self.watchcpu:\n logger.debug('Bailing on debugger iteration')\n self.kill(self.savedpid, 99)\n return\n self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])\n self.t.start()\n if self.watchcpu:\n while p.poll() is None and not done and child_pid:\n for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(\n IDProcess=child_pid):\n n1, d1 = long(proc.PercentProcessorTime), long(proc.\n Timestamp_Sys100NS)\n n0, d0 = process_info.get(child_pid, (0, 0))\n try:\n percent_processor_time = float(n1 - n0) / float(d1 - d0\n ) * 100.0\n except ZeroDivisionError:\n percent_processor_time = 0.0\n process_info[child_pid] = n1, d1\n logger.debug('Process %s CPU usage: %s', child_pid,\n percent_processor_time)\n if percent_processor_time < 1e-10:\n if started:\n logger.debug(\n 'killing cdb session for %s due to CPU inactivity'\n , child_pid)\n done = True\n self.kill(self.savedpid, 99)\n else:\n started = True\n if not done:\n time.sleep(0.2)\n else:\n p.wait()\n self.t.cancel()\n\n def go(self):\n \"\"\"run cdb and process output\"\"\"\n if self.exception_depth > 0:\n self.outfile = os.path.splitext(self.outfile)[0] + '.e' + str(self\n .exception_depth) + os.path.splitext(self.outfile)[1]\n self.run_with_timer()\n if not os.path.exists(self.outfile):\n open(self.outfile, 'w').close()\n parsed = MsecFile(self.outfile)\n for l in pformat(parsed.__dict__).splitlines():\n logger.debug('parsed: %s', l)\n return parsed\n\n def __exit__(self, etype, value, traceback):\n if self.t:\n logger.debug('Canceling timer...')\n self.t.cancel()\n",
"step-5": "\"\"\"This module runs cdb on a process and !exploitable on any exceptions.\r\n\"\"\"\r\nimport ctypes\r\nimport logging\r\nimport os\r\nfrom pprint import pformat\r\nfrom subprocess import Popen\r\nfrom threading import Timer\r\nimport time\r\n\r\nfrom certfuzz.debuggers.debugger_base import Debugger as DebuggerBase\r\nfrom certfuzz.debuggers.output_parsers.msec_file import MsecFile\r\n\r\nimport sys\r\n\r\nif sys.platform.startswith('win'):\r\n import wmi\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef factory(options):\r\n return MsecDebugger(options)\r\n\r\n\r\nclass MsecDebugger(DebuggerBase):\r\n _platform = 'Windows'\r\n _key = 'msec'\r\n _ext = 'msec'\r\n\r\n def __init__(self, program, cmd_args, outfile_base, timeout, watchcpu, exception_depth=0, cdb_command='!exploitable -v', debug_heap=False, ** options):\r\n DebuggerBase.__init__(\r\n self, program, cmd_args, outfile_base, timeout, **options)\r\n self.exception_depth = exception_depth\r\n self.watchcpu = watchcpu\r\n if watchcpu:\r\n self.wmiInterface = wmi.WMI()\r\n self.t = None\r\n self.savedpid = None\r\n self.cdb_command = cdb_command\r\n self.debugheap = debug_heap\r\n\r\n def kill(self, pid, returncode):\r\n \"\"\"kill function for Win32\"\"\"\r\n kernel32 = ctypes.windll.kernel32\r\n handle = kernel32.OpenProcess(1, 1, pid)\r\n ret = kernel32.TerminateProcess(handle, returncode)\r\n kernel32.CloseHandle(handle)\r\n return (0 != ret)\r\n\r\n def debugger_app(self):\r\n '''\r\n Returns the name of the debugger application to use in this class\r\n '''\r\n typical = \"C:\\\\Program Files\\\\Debugging Tools for Windows (x86)\\\\cdb.exe\"\r\n if os.path.exists(typical):\r\n return typical\r\n return 'cdb'\r\n\r\n def debugger_test(self):\r\n '''\r\n Returns a command line (as list) that can be run via subprocess.call\r\n to confirm whether the debugger is on the path.\r\n '''\r\n return [self.debugger_app(), '-version']\r\n\r\n def _get_cmdline(self, outfile):\r\n cdb_command = '$$Found_with_CERT_BFF_2.8;r;%s;q' % self.cdb_command\r\n args = []\r\n args.append(self.debugger_app())\r\n args.append('-amsec.dll')\r\n if hasattr(self, 'debugheap') and self.debugheap:\r\n # do not use hd, xd options if debugheap is set\r\n pass\r\n else:\r\n args.extend(('-hd', '-xd', 'gp'))\r\n args.extend(('-logo', outfile))\r\n args.extend(('-xd', 'bpe', '-xd', 'wob', '-o', '-G', '-c'))\r\n for self.exception_depth in xrange(0, self.exception_depth):\r\n cdb_command = 'g;' + cdb_command\r\n args.append(cdb_command)\r\n args.append(self.program)\r\n args.extend(self.cmd_args)\r\n for l in pformat(args).splitlines():\r\n logger.debug('dbg_args: %s', l)\r\n return args\r\n\r\n def _find_debug_target(self, exename, trycount=5):\r\n pid = None\r\n attempts = 0\r\n foundpid = False\r\n\r\n if self.watchcpu:\r\n\r\n while attempts < trycount and not foundpid:\r\n for process in self.wmiInterface.Win32_Process(name=exename):\r\n # TODO: What if there's more than one?\r\n pid = process.ProcessID\r\n logger.debug('Found %s PID: %s', exename, pid)\r\n foundpid = True\r\n\r\n attempts += 1\r\n if not foundpid and attempts < trycount:\r\n logger.debug('%s not seen yet. Retrying...', exename)\r\n time.sleep(0.1)\r\n\r\n if not pid:\r\n logger.debug('Cannot find %s child process!', exename)\r\n return pid\r\n\r\n def run_with_timer(self):\r\n # TODO: replace this with subp.run_with_timer()\r\n exename = os.path.basename(self.program)\r\n process_info = {}\r\n child_pid = None\r\n done = False\r\n started = False\r\n\r\n args = self._get_cmdline(self.outfile)\r\n p = Popen(args, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'),\r\n universal_newlines=True)\r\n self.savedpid = p.pid\r\n\r\n child_pid = self._find_debug_target(exename, trycount=5)\r\n if child_pid is None and self.watchcpu:\r\n logger.debug('Bailing on debugger iteration')\r\n self.kill(self.savedpid, 99)\r\n return\r\n\r\n # create a timer that calls kill() when it expires\r\n self.t = Timer(self.timeout, self.kill, args=[self.savedpid, 99])\r\n self.t.start()\r\n if self.watchcpu:\r\n # This is a race. In some cases, a GUI app could be done before we can even measure it\r\n # TODO: Do something about it\r\n while p.poll() is None and not done and child_pid:\r\n for proc in self.wmiInterface.Win32_PerfRawData_PerfProc_Process(IDProcess=child_pid):\r\n n1, d1 = long(proc.PercentProcessorTime), long(\r\n proc.Timestamp_Sys100NS)\r\n n0, d0 = process_info.get(child_pid, (0, 0))\r\n try:\r\n percent_processor_time = (\r\n float(n1 - n0) / float(d1 - d0)) * 100.0\r\n except ZeroDivisionError:\r\n percent_processor_time = 0.0\r\n process_info[child_pid] = (n1, d1)\r\n logger.debug(\r\n 'Process %s CPU usage: %s', child_pid, percent_processor_time)\r\n if percent_processor_time < 0.0000000001:\r\n if started:\r\n logger.debug(\r\n 'killing cdb session for %s due to CPU inactivity', child_pid)\r\n done = True\r\n self.kill(self.savedpid, 99)\r\n else:\r\n # Detected CPU usage. Now look for it to drop near zero\r\n started = True\r\n\r\n if not done:\r\n time.sleep(0.2)\r\n else:\r\n p.wait()\r\n self.t.cancel()\r\n\r\n def go(self):\r\n \"\"\"run cdb and process output\"\"\"\r\n # For exceptions beyond the first one, put the handled exception number\r\n # in the name\r\n if self.exception_depth > 0:\r\n self.outfile = os.path.splitext(self.outfile)[\r\n 0] + '.e' + str(self.exception_depth) + os.path.splitext(self.outfile)[1]\r\n self.run_with_timer()\r\n if not os.path.exists(self.outfile):\r\n # touch it if it doesn't exist\r\n open(self.outfile, 'w').close()\r\n\r\n parsed = MsecFile(self.outfile)\r\n\r\n for l in pformat(parsed.__dict__).splitlines():\r\n logger.debug('parsed: %s', l)\r\n return parsed\r\n\r\n def __exit__(self, etype, value, traceback):\r\n if self.t:\r\n logger.debug('Canceling timer...')\r\n self.t.cancel()\r\n\r\n# END MsecDebugger\r\n",
"step-ids": [
8,
9,
12,
15,
16
]
}
|
[
8,
9,
12,
15,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def display(request, id):
context = {'job': Job.objects.get(id=int(id))}
return render(request, 'handy_helper_exam/display.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render, HttpResponse, redirect
from ..login.models import *
from ..dashboard.models import *
def display(request, id):
context = {'job': Job.objects.get(id=int(id))}
return render(request, 'handy_helper_exam/display.html', context)
|
flexible
|
{
"blob_id": "f1fdba1c07a29aa22ee8d0dcbd6f902aa2e8b4c2",
"index": 9342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef display(request, id):\n context = {'job': Job.objects.get(id=int(id))}\n return render(request, 'handy_helper_exam/display.html', context)\n",
"step-3": "from django.shortcuts import render, HttpResponse, redirect\nfrom ..login.models import *\nfrom ..dashboard.models import *\n\n\ndef display(request, id):\n context = {'job': Job.objects.get(id=int(id))}\n return render(request, 'handy_helper_exam/display.html', context)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import tempfile
import unittest
from unittest.mock import mock_open, patch, MagicMock, call
import compare_apple_music_and_spotify as music_compare
class get_apple_music_data(unittest.TestCase):
def test_open_file(self):
with patch("builtins.open", mock_open(read_data="data")) as mock_file:
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
assert open("/apple_music").read() == "data"
mock_file.assert_called_with("/apple_music")
def test_save_one_artist_from_line(self):
with patch("builtins.open", mock_open(read_data="""<key>Sort Artist</key><string>Drew Goddard</string>""")):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual("Drew Goddard", apple_music_data_parser.one_song_and_artist.get('Artist'))
def test_save_one_song(self):
with patch("builtins.open",
mock_open(read_data="""<key>Sort Name</key><string>The Cabin In the Woods</string>""")):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual("The Cabin In the Woods", apple_music_data_parser.one_song_and_artist.get('Song'))
def test_save_one_song_and_artist(self):
with patch("builtins.open", mock_open(read_data="""<key>Sort Artist</key><string>Drew Goddard</string>
<key>Sort Name</key><string>The Cabin In the Woods</string>""")):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual([{'Artist': "Drew Goddard", 'Song': "The Cabin In the Woods"}],
apple_music_data_parser.all_songs_and_artists)
def test_save_several_songs_and_artists(self):
with patch("builtins.open", mock_open(read_data='''<key>Sort Name</key><string>The Cabin In the Woods</string>
<key>Sort Artist</key><string>Drew Goddard</string>
<key>Sort Name</key><string>Pulp Fiction</string>
<key>Sort Artist</key><string>Quentin Tarantino</string>''')):
apple_music_data_parser = music_compare.AppleMusicDataParser()
apple_music_data_parser.create("/apple_music")
self.assertEqual([{'Artist': "Drew Goddard", 'Song': "The Cabin In the Woods"},
{'Artist': "Quentin Tarantino", 'Song': "Pulp Fiction"}],
apple_music_data_parser.all_songs_and_artists)
class spotify_data_parser(unittest.TestCase):
def test_open_file_and_return_formated_data_split_by_coma(self):
with patch("builtins.open", mock_open(read_data="split,by,")):
result = music_compare.spotify_data_parser().read_file("/test_path")
open.assert_called_once_with("/test_path", "r", newline='')
self.assertTrue(result, "_csv.DictReader")
def test_no_artist_found_on_line(self):
lines_csv_dict_reader_formated = {
"not found": "not important",
}
result= music_compare.spotify_data_parser().is_artist(lines_csv_dict_reader_formated)
self.assertEqual(False,result)
def test_artist_found_on_line(self):
lines_csv_dict_reader_formated = {
"Artist Name": "Avenged Sevenfold",
}
result= music_compare.spotify_data_parser().is_artist(lines_csv_dict_reader_formated)
self.assertEqual(True,result)
def test_song_not_found_on_line(self):
lines_csv_dict_reader_formated = {
"not found": "Nightmare",
}
result= music_compare.spotify_data_parser().is_song(lines_csv_dict_reader_formated)
self.assertEqual(False,result)
def test_song_found_on_line(self):
lines_csv_dict_reader_formated = {
"Track Name": "Nightmare",
}
result= music_compare.spotify_data_parser().is_song(lines_csv_dict_reader_formated)
self.assertEqual(True,result)
def test_dont_save_if_artist_not_found(self):
lines_csv_dict_reader_formated = {
"not found": "not important",
}
music_compare.spotify_data_parser().save_artist(lines_csv_dict_reader_formated)
self.assertEqual({},music_compare.spotify_data_parser().one_song_and_artist)
def test_save_if_artist_found(self):
lines_csv_dict_reader_formated = {
"Artist Name": "test_artist",
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)
self.assertEqual('test_artist', self.spotify_data_parser.one_song_and_artist.get('Artist'))
def test_dont_save_if_song_not_found(self):
lines_csv_dict_reader_formated = {
"not found": "not important",
}
music_compare.spotify_data_parser().save_song(lines_csv_dict_reader_formated)
self.assertEqual({},music_compare.spotify_data_parser().one_song_and_artist)
def test_save_if_song_found(self):
lines_csv_dict_reader_formated = {
"Track Name": "test_song",
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)
self.assertEqual('test_song', self.spotify_data_parser .one_song_and_artist.get('Song'))
def test_combine_song_found_and_NOT_artist(self):
lines_csv_dict_reader_formated = {
"Name": "test_song",
"Artist": "test_artist"
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)
self.spotify_data_parser.combine_song_and_artist()
self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)
def test_combine_song_and_artist_if_found(self):
lines_csv_dict_reader_formated = {
"Track Name": "test_song",
"Artist Name": "test_artist"
}
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)
self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)
self.spotify_data_parser.combine_song_and_artist()
self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],
self.spotify_data_parser.all_songs_and_artists)
def test_combine_several_songs_and_artists(self):
with patch("builtins.open", mock_open(read_data='''Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At
"spotify:track:4UEo1b0wWrtHMC8bVqPiH8","Nightmare","Avenged Sevenfold","Nightmare","1","1","374453","spotify:user:","2010-10-17T20:18:40Z"
"spotify:track:1d5UuboIPRMD4HaU3yycKC","Somewhere I Belong","Linkin Park","Meteora (Bonus Edition)","1","3","213933","spotify:user:","2010-10-17T20:24:25Z"''')):
self.spotify_data_parser = music_compare.spotify_data_parser()
self.spotify_data_parser.create("/test_path")
self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song': 'Nightmare'},
{'Artist': 'Linkin Park', 'Song': 'Somewhere I Belong'}],
self.spotify_data_parser.all_songs_and_artists)
class apple_music_and_spotify_comparer(unittest.TestCase):
def setUp(self):
self.comparer = music_compare.apple_music_and_spotify_comparer()
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_save_data_from_spotify_and_apple_music_in_class(self, apple_music, spotify):
test = music_compare.apple_music_and_spotify_comparer()
spotify.return_value = [{'Artist': 'test_artist1', 'Song': 'test_song1'}]
apple_music.return_value = [{'Artist': 'test_artist2', 'Song': 'test_song2'}]
test.save_data_locally("/spotify", "/apple_music")
self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}], test.spotify_lib)
self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}], test.apple_music_lib)
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_song_and_artist_when_song_not_found_in_apple_music(self, apple_music, spotify):
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
mock_print.assert_has_calls(
[call('following songs not found in apple_music:'),
call('test_song_no_match by artist test_artist_no_match')])
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_song_and_artist_when_song_not_found_in_spotify(self, apple_music, spotify):
spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
mock_print.assert_has_calls([call('following songs not found in spotify:'),
call('test_song by artist test_artist'),
call()])
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(self, apple_music, spotify):
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'},
{'Artist': 'test_artist_no_match2', 'Song': 'test_song_no_match2'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
self.assertEqual(3, mock_print.call_count)
mock_print.assert_has_calls(
[call('following songs not found in apple_music:'),
call('test_song_no_match by artist test_artist_no_match'),
call('test_song_no_match2 by artist test_artist_no_match2')],
any_order=False)
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_several_songs_and_artists_when_song_not_found_in_spotify(self, apple_music, spotify):
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'},
{'Artist': 'test_artist_no_match2', 'Song': 'test_song_no_match2'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist2', 'Song': 'test_song2'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
self.assertEqual(4, mock_print.call_count)
mock_print.assert_has_calls(
[call('following songs not found in spotify:'),
call('test_song_no_match by artist test_artist_no_match'),
call('test_song_no_match2 by artist test_artist_no_match2'),
call()],
any_order=False)
@patch.object(music_compare.spotify_data_parser, 'create')
@patch.object(music_compare.AppleMusicDataParser, 'create')
def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(self, apple_music,
spotify):
apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_only_apple_music', 'Song': 'test_song_only_apple_music'}]
spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},
{'Artist': 'test_artist_only_spotify', 'Song': 'test_song_only_spotify'}]
with patch("builtins.print") as mock_print:
self.comparer.find_matches("/spotify", "/apple_music")
self.assertEqual(5, mock_print.call_count)
mock_print.assert_has_calls([call("following songs not found in spotify:"),
call('test_song_only_apple_music by artist test_artist_only_apple_music'),
call(),
call("following songs not found in apple_music:"),
call('test_song_only_spotify by artist test_artist_only_spotify')
])
|
normal
|
{
"blob_id": "eec08b3fdd4beb7d88ac0dc6d2e8776cf54fda35",
"index": 2727,
"step-1": "<mask token>\n\n\nclass spotify_data_parser(unittest.TestCase):\n\n def test_open_file_and_return_formated_data_split_by_coma(self):\n with patch('builtins.open', mock_open(read_data='split,by,')):\n result = music_compare.spotify_data_parser().read_file('/test_path'\n )\n open.assert_called_once_with('/test_path', 'r', newline='')\n self.assertTrue(result, '_csv.DictReader')\n\n def test_no_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'Avenged Sevenfold'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_song_not_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_song_found_on_line(self):\n lines_csv_dict_reader_formated = {'Track Name': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_dont_save_if_artist_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_artist_found(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.assertEqual('test_artist', self.spotify_data_parser.\n one_song_and_artist.get('Artist'))\n\n def test_dont_save_if_song_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_song(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n <mask token>\n\n def test_combine_song_found_and_NOT_artist(self):\n lines_csv_dict_reader_formated = {'Name': 'test_song', 'Artist':\n 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_song_and_artist_if_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song',\n 'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],\n self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_several_songs_and_artists(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At\n\"spotify:track:4UEo1b0wWrtHMC8bVqPiH8\",\"Nightmare\",\"Avenged Sevenfold\",\"Nightmare\",\"1\",\"1\",\"374453\",\"spotify:user:\",\"2010-10-17T20:18:40Z\"\n\"spotify:track:1d5UuboIPRMD4HaU3yycKC\",\"Somewhere I Belong\",\"Linkin Park\",\"Meteora (Bonus Edition)\",\"1\",\"3\",\"213933\",\"spotify:user:\",\"2010-10-17T20:24:25Z\\\"\"\"\"\n )):\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.create('/test_path')\n self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song':\n 'Nightmare'}, {'Artist': 'Linkin Park', 'Song':\n 'Somewhere I Belong'}], self.spotify_data_parser.\n all_songs_and_artists)\n\n\nclass apple_music_and_spotify_comparer(unittest.TestCase):\n\n def setUp(self):\n self.comparer = music_compare.apple_music_and_spotify_comparer()\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_save_data_from_spotify_and_apple_music_in_class(self,\n apple_music, spotify):\n test = music_compare.apple_music_and_spotify_comparer()\n spotify.return_value = [{'Artist': 'test_artist1', 'Song':\n 'test_song1'}]\n apple_music.return_value = [{'Artist': 'test_artist2', 'Song':\n 'test_song2'}]\n test.save_data_locally('/spotify', '/apple_music')\n self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}],\n test.spotify_lib)\n self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}],\n test.apple_music_lib)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_apple_music(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match')])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_spotify(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song by artist test_artist'), call()])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(\n self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(3, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2')],\n any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_spotify(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(4, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2'),\n call()], any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_apple_music', 'Song':\n 'test_song_only_apple_music'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_spotify', 'Song':\n 'test_song_only_spotify'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(5, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_only_apple_music by artist test_artist_only_apple_music'\n ), call(), call('following songs not found in apple_music:'\n ), call(\n 'test_song_only_spotify by artist test_artist_only_spotify')])\n",
"step-2": "<mask token>\n\n\nclass get_apple_music_data(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_save_one_song(self):\n with patch('builtins.open', mock_open(read_data=\n '<key>Sort Name</key><string>The Cabin In the Woods</string>')):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual('The Cabin In the Woods',\n apple_music_data_parser.one_song_and_artist.get('Song'))\n <mask token>\n <mask token>\n\n\nclass spotify_data_parser(unittest.TestCase):\n\n def test_open_file_and_return_formated_data_split_by_coma(self):\n with patch('builtins.open', mock_open(read_data='split,by,')):\n result = music_compare.spotify_data_parser().read_file('/test_path'\n )\n open.assert_called_once_with('/test_path', 'r', newline='')\n self.assertTrue(result, '_csv.DictReader')\n\n def test_no_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'Avenged Sevenfold'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_song_not_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_song_found_on_line(self):\n lines_csv_dict_reader_formated = {'Track Name': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_dont_save_if_artist_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_artist_found(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.assertEqual('test_artist', self.spotify_data_parser.\n one_song_and_artist.get('Artist'))\n\n def test_dont_save_if_song_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_song(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_song_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.assertEqual('test_song', self.spotify_data_parser.\n one_song_and_artist.get('Song'))\n\n def test_combine_song_found_and_NOT_artist(self):\n lines_csv_dict_reader_formated = {'Name': 'test_song', 'Artist':\n 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_song_and_artist_if_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song',\n 'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],\n self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_several_songs_and_artists(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At\n\"spotify:track:4UEo1b0wWrtHMC8bVqPiH8\",\"Nightmare\",\"Avenged Sevenfold\",\"Nightmare\",\"1\",\"1\",\"374453\",\"spotify:user:\",\"2010-10-17T20:18:40Z\"\n\"spotify:track:1d5UuboIPRMD4HaU3yycKC\",\"Somewhere I Belong\",\"Linkin Park\",\"Meteora (Bonus Edition)\",\"1\",\"3\",\"213933\",\"spotify:user:\",\"2010-10-17T20:24:25Z\\\"\"\"\"\n )):\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.create('/test_path')\n self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song':\n 'Nightmare'}, {'Artist': 'Linkin Park', 'Song':\n 'Somewhere I Belong'}], self.spotify_data_parser.\n all_songs_and_artists)\n\n\nclass apple_music_and_spotify_comparer(unittest.TestCase):\n\n def setUp(self):\n self.comparer = music_compare.apple_music_and_spotify_comparer()\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_save_data_from_spotify_and_apple_music_in_class(self,\n apple_music, spotify):\n test = music_compare.apple_music_and_spotify_comparer()\n spotify.return_value = [{'Artist': 'test_artist1', 'Song':\n 'test_song1'}]\n apple_music.return_value = [{'Artist': 'test_artist2', 'Song':\n 'test_song2'}]\n test.save_data_locally('/spotify', '/apple_music')\n self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}],\n test.spotify_lib)\n self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}],\n test.apple_music_lib)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_apple_music(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match')])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_spotify(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song by artist test_artist'), call()])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(\n self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(3, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2')],\n any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_spotify(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(4, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2'),\n call()], any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_apple_music', 'Song':\n 'test_song_only_apple_music'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_spotify', 'Song':\n 'test_song_only_spotify'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(5, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_only_apple_music by artist test_artist_only_apple_music'\n ), call(), call('following songs not found in apple_music:'\n ), call(\n 'test_song_only_spotify by artist test_artist_only_spotify')])\n",
"step-3": "<mask token>\n\n\nclass get_apple_music_data(unittest.TestCase):\n\n def test_open_file(self):\n with patch('builtins.open', mock_open(read_data='data')) as mock_file:\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n assert open('/apple_music').read() == 'data'\n mock_file.assert_called_with('/apple_music')\n\n def test_save_one_artist_from_line(self):\n with patch('builtins.open', mock_open(read_data=\n '<key>Sort Artist</key><string>Drew Goddard</string>')):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual('Drew Goddard', apple_music_data_parser.\n one_song_and_artist.get('Artist'))\n\n def test_save_one_song(self):\n with patch('builtins.open', mock_open(read_data=\n '<key>Sort Name</key><string>The Cabin In the Woods</string>')):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual('The Cabin In the Woods',\n apple_music_data_parser.one_song_and_artist.get('Song'))\n\n def test_save_one_song_and_artist(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"<key>Sort Artist</key><string>Drew Goddard</string>\n <key>Sort Name</key><string>The Cabin In the Woods</string>\"\"\"\n )):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual([{'Artist': 'Drew Goddard', 'Song':\n 'The Cabin In the Woods'}], apple_music_data_parser.\n all_songs_and_artists)\n\n def test_save_several_songs_and_artists(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"<key>Sort Name</key><string>The Cabin In the Woods</string>\n <key>Sort Artist</key><string>Drew Goddard</string>\n <key>Sort Name</key><string>Pulp Fiction</string>\n\t<key>Sort Artist</key><string>Quentin Tarantino</string>\"\"\"\n )):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual([{'Artist': 'Drew Goddard', 'Song':\n 'The Cabin In the Woods'}, {'Artist': 'Quentin Tarantino',\n 'Song': 'Pulp Fiction'}], apple_music_data_parser.\n all_songs_and_artists)\n\n\nclass spotify_data_parser(unittest.TestCase):\n\n def test_open_file_and_return_formated_data_split_by_coma(self):\n with patch('builtins.open', mock_open(read_data='split,by,')):\n result = music_compare.spotify_data_parser().read_file('/test_path'\n )\n open.assert_called_once_with('/test_path', 'r', newline='')\n self.assertTrue(result, '_csv.DictReader')\n\n def test_no_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'Avenged Sevenfold'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_song_not_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_song_found_on_line(self):\n lines_csv_dict_reader_formated = {'Track Name': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_dont_save_if_artist_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_artist_found(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.assertEqual('test_artist', self.spotify_data_parser.\n one_song_and_artist.get('Artist'))\n\n def test_dont_save_if_song_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_song(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_song_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.assertEqual('test_song', self.spotify_data_parser.\n one_song_and_artist.get('Song'))\n\n def test_combine_song_found_and_NOT_artist(self):\n lines_csv_dict_reader_formated = {'Name': 'test_song', 'Artist':\n 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_song_and_artist_if_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song',\n 'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],\n self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_several_songs_and_artists(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At\n\"spotify:track:4UEo1b0wWrtHMC8bVqPiH8\",\"Nightmare\",\"Avenged Sevenfold\",\"Nightmare\",\"1\",\"1\",\"374453\",\"spotify:user:\",\"2010-10-17T20:18:40Z\"\n\"spotify:track:1d5UuboIPRMD4HaU3yycKC\",\"Somewhere I Belong\",\"Linkin Park\",\"Meteora (Bonus Edition)\",\"1\",\"3\",\"213933\",\"spotify:user:\",\"2010-10-17T20:24:25Z\\\"\"\"\"\n )):\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.create('/test_path')\n self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song':\n 'Nightmare'}, {'Artist': 'Linkin Park', 'Song':\n 'Somewhere I Belong'}], self.spotify_data_parser.\n all_songs_and_artists)\n\n\nclass apple_music_and_spotify_comparer(unittest.TestCase):\n\n def setUp(self):\n self.comparer = music_compare.apple_music_and_spotify_comparer()\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_save_data_from_spotify_and_apple_music_in_class(self,\n apple_music, spotify):\n test = music_compare.apple_music_and_spotify_comparer()\n spotify.return_value = [{'Artist': 'test_artist1', 'Song':\n 'test_song1'}]\n apple_music.return_value = [{'Artist': 'test_artist2', 'Song':\n 'test_song2'}]\n test.save_data_locally('/spotify', '/apple_music')\n self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}],\n test.spotify_lib)\n self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}],\n test.apple_music_lib)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_apple_music(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match')])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_spotify(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song by artist test_artist'), call()])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(\n self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(3, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2')],\n any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_spotify(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(4, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2'),\n call()], any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_apple_music', 'Song':\n 'test_song_only_apple_music'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_spotify', 'Song':\n 'test_song_only_spotify'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(5, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_only_apple_music by artist test_artist_only_apple_music'\n ), call(), call('following songs not found in apple_music:'\n ), call(\n 'test_song_only_spotify by artist test_artist_only_spotify')])\n",
"step-4": "import tempfile\nimport unittest\nfrom unittest.mock import mock_open, patch, MagicMock, call\nimport compare_apple_music_and_spotify as music_compare\n\n\nclass get_apple_music_data(unittest.TestCase):\n\n def test_open_file(self):\n with patch('builtins.open', mock_open(read_data='data')) as mock_file:\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n assert open('/apple_music').read() == 'data'\n mock_file.assert_called_with('/apple_music')\n\n def test_save_one_artist_from_line(self):\n with patch('builtins.open', mock_open(read_data=\n '<key>Sort Artist</key><string>Drew Goddard</string>')):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual('Drew Goddard', apple_music_data_parser.\n one_song_and_artist.get('Artist'))\n\n def test_save_one_song(self):\n with patch('builtins.open', mock_open(read_data=\n '<key>Sort Name</key><string>The Cabin In the Woods</string>')):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual('The Cabin In the Woods',\n apple_music_data_parser.one_song_and_artist.get('Song'))\n\n def test_save_one_song_and_artist(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"<key>Sort Artist</key><string>Drew Goddard</string>\n <key>Sort Name</key><string>The Cabin In the Woods</string>\"\"\"\n )):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual([{'Artist': 'Drew Goddard', 'Song':\n 'The Cabin In the Woods'}], apple_music_data_parser.\n all_songs_and_artists)\n\n def test_save_several_songs_and_artists(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"<key>Sort Name</key><string>The Cabin In the Woods</string>\n <key>Sort Artist</key><string>Drew Goddard</string>\n <key>Sort Name</key><string>Pulp Fiction</string>\n\t<key>Sort Artist</key><string>Quentin Tarantino</string>\"\"\"\n )):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create('/apple_music')\n self.assertEqual([{'Artist': 'Drew Goddard', 'Song':\n 'The Cabin In the Woods'}, {'Artist': 'Quentin Tarantino',\n 'Song': 'Pulp Fiction'}], apple_music_data_parser.\n all_songs_and_artists)\n\n\nclass spotify_data_parser(unittest.TestCase):\n\n def test_open_file_and_return_formated_data_split_by_coma(self):\n with patch('builtins.open', mock_open(read_data='split,by,')):\n result = music_compare.spotify_data_parser().read_file('/test_path'\n )\n open.assert_called_once_with('/test_path', 'r', newline='')\n self.assertTrue(result, '_csv.DictReader')\n\n def test_no_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'Avenged Sevenfold'}\n result = music_compare.spotify_data_parser().is_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_song_not_found_on_line(self):\n lines_csv_dict_reader_formated = {'not found': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(False, result)\n\n def test_song_found_on_line(self):\n lines_csv_dict_reader_formated = {'Track Name': 'Nightmare'}\n result = music_compare.spotify_data_parser().is_song(\n lines_csv_dict_reader_formated)\n self.assertEqual(True, result)\n\n def test_dont_save_if_artist_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_artist(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_artist_found(self):\n lines_csv_dict_reader_formated = {'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.assertEqual('test_artist', self.spotify_data_parser.\n one_song_and_artist.get('Artist'))\n\n def test_dont_save_if_song_not_found(self):\n lines_csv_dict_reader_formated = {'not found': 'not important'}\n music_compare.spotify_data_parser().save_song(\n lines_csv_dict_reader_formated)\n self.assertEqual({}, music_compare.spotify_data_parser().\n one_song_and_artist)\n\n def test_save_if_song_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.assertEqual('test_song', self.spotify_data_parser.\n one_song_and_artist.get('Song'))\n\n def test_combine_song_found_and_NOT_artist(self):\n lines_csv_dict_reader_formated = {'Name': 'test_song', 'Artist':\n 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_song_and_artist_if_found(self):\n lines_csv_dict_reader_formated = {'Track Name': 'test_song',\n 'Artist Name': 'test_artist'}\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],\n self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_several_songs_and_artists(self):\n with patch('builtins.open', mock_open(read_data=\n \"\"\"Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At\n\"spotify:track:4UEo1b0wWrtHMC8bVqPiH8\",\"Nightmare\",\"Avenged Sevenfold\",\"Nightmare\",\"1\",\"1\",\"374453\",\"spotify:user:\",\"2010-10-17T20:18:40Z\"\n\"spotify:track:1d5UuboIPRMD4HaU3yycKC\",\"Somewhere I Belong\",\"Linkin Park\",\"Meteora (Bonus Edition)\",\"1\",\"3\",\"213933\",\"spotify:user:\",\"2010-10-17T20:24:25Z\\\"\"\"\"\n )):\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.create('/test_path')\n self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song':\n 'Nightmare'}, {'Artist': 'Linkin Park', 'Song':\n 'Somewhere I Belong'}], self.spotify_data_parser.\n all_songs_and_artists)\n\n\nclass apple_music_and_spotify_comparer(unittest.TestCase):\n\n def setUp(self):\n self.comparer = music_compare.apple_music_and_spotify_comparer()\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_save_data_from_spotify_and_apple_music_in_class(self,\n apple_music, spotify):\n test = music_compare.apple_music_and_spotify_comparer()\n spotify.return_value = [{'Artist': 'test_artist1', 'Song':\n 'test_song1'}]\n apple_music.return_value = [{'Artist': 'test_artist2', 'Song':\n 'test_song2'}]\n test.save_data_locally('/spotify', '/apple_music')\n self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}],\n test.spotify_lib)\n self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}],\n test.apple_music_lib)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_apple_music(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match')])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_spotify(self,\n apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song by artist test_artist'), call()])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(\n self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(3, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in apple_music:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2')],\n any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_spotify(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_no_match', 'Song':\n 'test_song_no_match'}, {'Artist': 'test_artist_no_match2',\n 'Song': 'test_song_no_match2'}, {'Artist': 'test_artist2',\n 'Song': 'test_song2'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(4, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_no_match by artist test_artist_no_match'), call(\n 'test_song_no_match2 by artist test_artist_no_match2'),\n call()], any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(\n self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_apple_music', 'Song':\n 'test_song_only_apple_music'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song':\n 'test_song'}, {'Artist': 'test_artist_only_spotify', 'Song':\n 'test_song_only_spotify'}]\n with patch('builtins.print') as mock_print:\n self.comparer.find_matches('/spotify', '/apple_music')\n self.assertEqual(5, mock_print.call_count)\n mock_print.assert_has_calls([call(\n 'following songs not found in spotify:'), call(\n 'test_song_only_apple_music by artist test_artist_only_apple_music'\n ), call(), call('following songs not found in apple_music:'\n ), call(\n 'test_song_only_spotify by artist test_artist_only_spotify')])\n",
"step-5": "import tempfile\nimport unittest\n\nfrom unittest.mock import mock_open, patch, MagicMock, call\nimport compare_apple_music_and_spotify as music_compare\n\n\nclass get_apple_music_data(unittest.TestCase):\n def test_open_file(self):\n with patch(\"builtins.open\", mock_open(read_data=\"data\")) as mock_file:\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create(\"/apple_music\")\n assert open(\"/apple_music\").read() == \"data\"\n mock_file.assert_called_with(\"/apple_music\")\n\n def test_save_one_artist_from_line(self):\n with patch(\"builtins.open\", mock_open(read_data=\"\"\"<key>Sort Artist</key><string>Drew Goddard</string>\"\"\")):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create(\"/apple_music\")\n self.assertEqual(\"Drew Goddard\", apple_music_data_parser.one_song_and_artist.get('Artist'))\n\n def test_save_one_song(self):\n with patch(\"builtins.open\",\n mock_open(read_data=\"\"\"<key>Sort Name</key><string>The Cabin In the Woods</string>\"\"\")):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create(\"/apple_music\")\n self.assertEqual(\"The Cabin In the Woods\", apple_music_data_parser.one_song_and_artist.get('Song'))\n\n def test_save_one_song_and_artist(self):\n with patch(\"builtins.open\", mock_open(read_data=\"\"\"<key>Sort Artist</key><string>Drew Goddard</string>\n <key>Sort Name</key><string>The Cabin In the Woods</string>\"\"\")):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create(\"/apple_music\")\n self.assertEqual([{'Artist': \"Drew Goddard\", 'Song': \"The Cabin In the Woods\"}],\n apple_music_data_parser.all_songs_and_artists)\n\n def test_save_several_songs_and_artists(self):\n with patch(\"builtins.open\", mock_open(read_data='''<key>Sort Name</key><string>The Cabin In the Woods</string>\n <key>Sort Artist</key><string>Drew Goddard</string>\n <key>Sort Name</key><string>Pulp Fiction</string>\n\t<key>Sort Artist</key><string>Quentin Tarantino</string>''')):\n apple_music_data_parser = music_compare.AppleMusicDataParser()\n apple_music_data_parser.create(\"/apple_music\")\n self.assertEqual([{'Artist': \"Drew Goddard\", 'Song': \"The Cabin In the Woods\"},\n {'Artist': \"Quentin Tarantino\", 'Song': \"Pulp Fiction\"}],\n apple_music_data_parser.all_songs_and_artists)\n\n\n\nclass spotify_data_parser(unittest.TestCase):\n\n def test_open_file_and_return_formated_data_split_by_coma(self):\n with patch(\"builtins.open\", mock_open(read_data=\"split,by,\")):\n result = music_compare.spotify_data_parser().read_file(\"/test_path\")\n open.assert_called_once_with(\"/test_path\", \"r\", newline='')\n self.assertTrue(result, \"_csv.DictReader\")\n\n def test_no_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {\n \"not found\": \"not important\",\n }\n result= music_compare.spotify_data_parser().is_artist(lines_csv_dict_reader_formated)\n self.assertEqual(False,result)\n\n def test_artist_found_on_line(self):\n lines_csv_dict_reader_formated = {\n \"Artist Name\": \"Avenged Sevenfold\",\n }\n result= music_compare.spotify_data_parser().is_artist(lines_csv_dict_reader_formated)\n self.assertEqual(True,result)\n\n def test_song_not_found_on_line(self):\n lines_csv_dict_reader_formated = {\n \"not found\": \"Nightmare\",\n }\n result= music_compare.spotify_data_parser().is_song(lines_csv_dict_reader_formated)\n self.assertEqual(False,result)\n\n def test_song_found_on_line(self):\n lines_csv_dict_reader_formated = {\n \"Track Name\": \"Nightmare\",\n }\n result= music_compare.spotify_data_parser().is_song(lines_csv_dict_reader_formated)\n self.assertEqual(True,result)\n\n def test_dont_save_if_artist_not_found(self):\n lines_csv_dict_reader_formated = {\n \"not found\": \"not important\",\n }\n music_compare.spotify_data_parser().save_artist(lines_csv_dict_reader_formated)\n self.assertEqual({},music_compare.spotify_data_parser().one_song_and_artist)\n\n def test_save_if_artist_found(self):\n lines_csv_dict_reader_formated = {\n \"Artist Name\": \"test_artist\",\n }\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n self.assertEqual('test_artist', self.spotify_data_parser.one_song_and_artist.get('Artist'))\n\n\n def test_dont_save_if_song_not_found(self):\n lines_csv_dict_reader_formated = {\n \"not found\": \"not important\",\n }\n music_compare.spotify_data_parser().save_song(lines_csv_dict_reader_formated)\n self.assertEqual({},music_compare.spotify_data_parser().one_song_and_artist)\n\n def test_save_if_song_found(self):\n lines_csv_dict_reader_formated = {\n \"Track Name\": \"test_song\",\n }\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.assertEqual('test_song', self.spotify_data_parser .one_song_and_artist.get('Song'))\n\n def test_combine_song_found_and_NOT_artist(self):\n lines_csv_dict_reader_formated = {\n \"Name\": \"test_song\",\n \"Artist\": \"test_artist\"\n }\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([], self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_song_and_artist_if_found(self):\n lines_csv_dict_reader_formated = {\n \"Track Name\": \"test_song\",\n \"Artist Name\": \"test_artist\"\n }\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.save_song(lines_csv_dict_reader_formated)\n self.spotify_data_parser.save_artist(lines_csv_dict_reader_formated)\n\n self.spotify_data_parser.combine_song_and_artist()\n self.assertEqual([{'Artist': 'test_artist', 'Song': 'test_song'}],\n self.spotify_data_parser.all_songs_and_artists)\n\n def test_combine_several_songs_and_artists(self):\n with patch(\"builtins.open\", mock_open(read_data='''Spotify URI,Track Name,Artist Name,Album Name,Disc Number,Track Number,Track Duration (ms),Added By,Added At\n\"spotify:track:4UEo1b0wWrtHMC8bVqPiH8\",\"Nightmare\",\"Avenged Sevenfold\",\"Nightmare\",\"1\",\"1\",\"374453\",\"spotify:user:\",\"2010-10-17T20:18:40Z\"\n\"spotify:track:1d5UuboIPRMD4HaU3yycKC\",\"Somewhere I Belong\",\"Linkin Park\",\"Meteora (Bonus Edition)\",\"1\",\"3\",\"213933\",\"spotify:user:\",\"2010-10-17T20:24:25Z\"''')):\n self.spotify_data_parser = music_compare.spotify_data_parser()\n self.spotify_data_parser.create(\"/test_path\")\n self.assertEqual([{'Artist': 'Avenged Sevenfold', 'Song': 'Nightmare'},\n {'Artist': 'Linkin Park', 'Song': 'Somewhere I Belong'}],\n self.spotify_data_parser.all_songs_and_artists)\n\n\nclass apple_music_and_spotify_comparer(unittest.TestCase):\n\n def setUp(self):\n self.comparer = music_compare.apple_music_and_spotify_comparer()\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_save_data_from_spotify_and_apple_music_in_class(self, apple_music, spotify):\n test = music_compare.apple_music_and_spotify_comparer()\n spotify.return_value = [{'Artist': 'test_artist1', 'Song': 'test_song1'}]\n apple_music.return_value = [{'Artist': 'test_artist2', 'Song': 'test_song2'}]\n test.save_data_locally(\"/spotify\", \"/apple_music\")\n self.assertEqual([{'Artist': 'test_artist1', 'Song': 'test_song1'}], test.spotify_lib)\n self.assertEqual([{'Artist': 'test_artist2', 'Song': 'test_song2'}], test.apple_music_lib)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_apple_music(self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'}]\n with patch(\"builtins.print\") as mock_print:\n self.comparer.find_matches(\"/spotify\", \"/apple_music\")\n mock_print.assert_has_calls(\n [call('following songs not found in apple_music:'),\n call('test_song_no_match by artist test_artist_no_match')])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_song_and_artist_when_song_not_found_in_spotify(self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'}]\n with patch(\"builtins.print\") as mock_print:\n self.comparer.find_matches(\"/spotify\", \"/apple_music\")\n mock_print.assert_has_calls([call('following songs not found in spotify:'),\n call('test_song by artist test_artist'),\n call()])\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_apple_music(self, apple_music, spotify):\n spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'},\n {'Artist': 'test_artist_no_match2', 'Song': 'test_song_no_match2'},\n {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch(\"builtins.print\") as mock_print:\n self.comparer.find_matches(\"/spotify\", \"/apple_music\")\n self.assertEqual(3, mock_print.call_count)\n mock_print.assert_has_calls(\n [call('following songs not found in apple_music:'),\n call('test_song_no_match by artist test_artist_no_match'),\n call('test_song_no_match2 by artist test_artist_no_match2')],\n any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_song_not_found_in_spotify(self, apple_music, spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist_no_match', 'Song': 'test_song_no_match'},\n {'Artist': 'test_artist_no_match2', 'Song': 'test_song_no_match2'},\n {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist2', 'Song': 'test_song2'}]\n with patch(\"builtins.print\") as mock_print:\n self.comparer.find_matches(\"/spotify\", \"/apple_music\")\n self.assertEqual(4, mock_print.call_count)\n mock_print.assert_has_calls(\n [call('following songs not found in spotify:'),\n call('test_song_no_match by artist test_artist_no_match'),\n call('test_song_no_match2 by artist test_artist_no_match2'),\n call()],\n any_order=False)\n\n @patch.object(music_compare.spotify_data_parser, 'create')\n @patch.object(music_compare.AppleMusicDataParser, 'create')\n def test_print_several_songs_and_artists_when_some_songs_missing_in_spotify_and_in_apple_music(self, apple_music,\n spotify):\n apple_music.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist_only_apple_music', 'Song': 'test_song_only_apple_music'}]\n spotify.return_value = [{'Artist': 'test_artist', 'Song': 'test_song'},\n {'Artist': 'test_artist_only_spotify', 'Song': 'test_song_only_spotify'}]\n\n with patch(\"builtins.print\") as mock_print:\n self.comparer.find_matches(\"/spotify\", \"/apple_music\")\n self.assertEqual(5, mock_print.call_count)\n mock_print.assert_has_calls([call(\"following songs not found in spotify:\"),\n call('test_song_only_apple_music by artist test_artist_only_apple_music'),\n call(),\n call(\"following songs not found in apple_music:\"),\n call('test_song_only_spotify by artist test_artist_only_spotify')\n ])\n",
"step-ids": [
20,
23,
27,
28,
29
]
}
|
[
20,
23,
27,
28,
29
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 8 18:04:13 2018
@author: zhangchi
"""
class Solution(object):
def transpose(self, A):
"""
:type A: List[List[int]]
:rtype: List[List[int]]
"""
row = len(A[0])
result = [[] for _ in range(row)]
for line in A:
for index, item in enumerate(line):
result[index].append(item)
return result
s = Solution()
print s.transpose([[1,2,3],[4,5,6]])
|
normal
|
{
"blob_id": "3882aaf94b19967a1d1eff23fa4862ea71de3b38",
"index": 7014,
"step-1": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 8 18:04:13 2018\n\n@author: zhangchi\n\"\"\"\n\nclass Solution(object):\n def transpose(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n row = len(A[0])\n result = [[] for _ in range(row)]\n for line in A:\n for index, item in enumerate(line):\n result[index].append(item)\n return result\n\ns = Solution()\nprint s.transpose([[1,2,3],[4,5,6]])",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import requests
from bs4 import BeautifulSoup
import codecs
url = "https://en.wikipedia.org/wiki/Pennsylvania_State_University"
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
infoBox = soup.find("table", class_="infobox vcard")
webScrape = {"Univeristy": "The Pennsylvania State University"}
wantedInfo = ["Motto", "Type", "Established", "Academic affiliations",
"Endowment", "Budget", "President", "Provost",
"Academic staff", "Students", "Undergraduates",
"Postgraduates", "Location", "Campus", "Newspaper",
"Colors", "Nickname", "Sporting affiliations", "Mascot", "Website"]
#Get all of the data inside info box
for tr in infoBox.find_all("tr"):
if len(tr.findChildren("th", recursive=False)) > 0 and \
len(tr.findChildren("td", recursive=False)) > 0:
#Grab table header and table data
header = tr.findChildren("th", recursive=False)[0]
data = tr.findChildren("td", recursive=False)[0]
#Add to dictionary if not in it already
if header.get_text() not in webScrape and header.get_text() in wantedInfo:
#Decompose unwanted tags
while data("sup"):
data.find("sup").decompose()
while data("span") and header.get_text() != "Website":
data.find("span").decompose()
webScrape[header.get_text()] = data.get_text()
#Writing to file
with codecs.open("webScrape.txt", "w", encoding="utf-8") as output_data:
for key in webScrape.keys():
output_data.write("{}: {}\n".format(key, webScrape[key]))
|
normal
|
{
"blob_id": "f45ca4e75de7df542fbc65253bb9cc44a868522a",
"index": 6398,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor tr in infoBox.find_all('tr'):\n if len(tr.findChildren('th', recursive=False)) > 0 and len(tr.\n findChildren('td', recursive=False)) > 0:\n header = tr.findChildren('th', recursive=False)[0]\n data = tr.findChildren('td', recursive=False)[0]\n if header.get_text() not in webScrape and header.get_text(\n ) in wantedInfo:\n while data('sup'):\n data.find('sup').decompose()\n while data('span') and header.get_text() != 'Website':\n data.find('span').decompose()\n webScrape[header.get_text()] = data.get_text()\nwith codecs.open('webScrape.txt', 'w', encoding='utf-8') as output_data:\n for key in webScrape.keys():\n output_data.write('{}: {}\\n'.format(key, webScrape[key]))\n",
"step-3": "<mask token>\nurl = 'https://en.wikipedia.org/wiki/Pennsylvania_State_University'\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.content, 'html.parser')\ninfoBox = soup.find('table', class_='infobox vcard')\nwebScrape = {'Univeristy': 'The Pennsylvania State University'}\nwantedInfo = ['Motto', 'Type', 'Established', 'Academic affiliations',\n 'Endowment', 'Budget', 'President', 'Provost', 'Academic staff',\n 'Students', 'Undergraduates', 'Postgraduates', 'Location', 'Campus',\n 'Newspaper', 'Colors', 'Nickname', 'Sporting affiliations', 'Mascot',\n 'Website']\nfor tr in infoBox.find_all('tr'):\n if len(tr.findChildren('th', recursive=False)) > 0 and len(tr.\n findChildren('td', recursive=False)) > 0:\n header = tr.findChildren('th', recursive=False)[0]\n data = tr.findChildren('td', recursive=False)[0]\n if header.get_text() not in webScrape and header.get_text(\n ) in wantedInfo:\n while data('sup'):\n data.find('sup').decompose()\n while data('span') and header.get_text() != 'Website':\n data.find('span').decompose()\n webScrape[header.get_text()] = data.get_text()\nwith codecs.open('webScrape.txt', 'w', encoding='utf-8') as output_data:\n for key in webScrape.keys():\n output_data.write('{}: {}\\n'.format(key, webScrape[key]))\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport codecs\nurl = 'https://en.wikipedia.org/wiki/Pennsylvania_State_University'\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.content, 'html.parser')\ninfoBox = soup.find('table', class_='infobox vcard')\nwebScrape = {'Univeristy': 'The Pennsylvania State University'}\nwantedInfo = ['Motto', 'Type', 'Established', 'Academic affiliations',\n 'Endowment', 'Budget', 'President', 'Provost', 'Academic staff',\n 'Students', 'Undergraduates', 'Postgraduates', 'Location', 'Campus',\n 'Newspaper', 'Colors', 'Nickname', 'Sporting affiliations', 'Mascot',\n 'Website']\nfor tr in infoBox.find_all('tr'):\n if len(tr.findChildren('th', recursive=False)) > 0 and len(tr.\n findChildren('td', recursive=False)) > 0:\n header = tr.findChildren('th', recursive=False)[0]\n data = tr.findChildren('td', recursive=False)[0]\n if header.get_text() not in webScrape and header.get_text(\n ) in wantedInfo:\n while data('sup'):\n data.find('sup').decompose()\n while data('span') and header.get_text() != 'Website':\n data.find('span').decompose()\n webScrape[header.get_text()] = data.get_text()\nwith codecs.open('webScrape.txt', 'w', encoding='utf-8') as output_data:\n for key in webScrape.keys():\n output_data.write('{}: {}\\n'.format(key, webScrape[key]))\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport codecs\n\nurl = \"https://en.wikipedia.org/wiki/Pennsylvania_State_University\"\n\nresponse = requests.get(url)\n\nsoup = BeautifulSoup(response.content, 'html.parser')\ninfoBox = soup.find(\"table\", class_=\"infobox vcard\")\n\nwebScrape = {\"Univeristy\": \"The Pennsylvania State University\"}\nwantedInfo = [\"Motto\", \"Type\", \"Established\", \"Academic affiliations\",\n \"Endowment\", \"Budget\", \"President\", \"Provost\", \n \"Academic staff\", \"Students\", \"Undergraduates\", \n \"Postgraduates\", \"Location\", \"Campus\", \"Newspaper\", \n \"Colors\", \"Nickname\", \"Sporting affiliations\", \"Mascot\", \"Website\"]\n \n#Get all of the data inside info box\nfor tr in infoBox.find_all(\"tr\"):\n if len(tr.findChildren(\"th\", recursive=False)) > 0 and \\\n len(tr.findChildren(\"td\", recursive=False)) > 0:\n \n #Grab table header and table data\n header = tr.findChildren(\"th\", recursive=False)[0]\n data = tr.findChildren(\"td\", recursive=False)[0]\n\n #Add to dictionary if not in it already\n if header.get_text() not in webScrape and header.get_text() in wantedInfo:\n #Decompose unwanted tags\n while data(\"sup\"):\n data.find(\"sup\").decompose()\n while data(\"span\") and header.get_text() != \"Website\":\n data.find(\"span\").decompose()\n webScrape[header.get_text()] = data.get_text()\n \n#Writing to file\nwith codecs.open(\"webScrape.txt\", \"w\", encoding=\"utf-8\") as output_data:\n for key in webScrape.keys():\n output_data.write(\"{}: {}\\n\".format(key, webScrape[key]))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
student_objects = Student.objects.all()
context = {'students': student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
student_objects = Student.objects.all()
context = {'students': student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
def newstudent(request):
student_entered_name = request.GET.get('name')
Student.objects.create(name=student_entered_name)
print(student_entered_name)
context = {}
return render(request, 'student_list.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
from .models import Student
def index(request):
student_objects = Student.objects.all()
context = {'students': student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
def newstudent(request):
student_entered_name = request.GET.get('name')
Student.objects.create(name=student_entered_name)
print(student_entered_name)
context = {}
return render(request, 'student_list.html', context)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.template import loader
# Create your views here.
from django.http import HttpResponse
from .models import Student
def index(request):
student_objects = Student.objects.all()
context = {"students": student_objects}
return render(request, 'student_list.html', context)
def addstudent(request):
context = {}
return render(request, 'add_student.html', context)
def newstudent(request):
student_entered_name = request.GET.get('name')
Student.objects.create(name=student_entered_name)
print(student_entered_name)
context = {}
return render(request, 'student_list.html', context)
|
flexible
|
{
"blob_id": "00e8e0b5aeccd2a67f6cfdad63012a0d8b066e6f",
"index": 9551,
"step-1": "<mask token>\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\ndef newstudent(request):\n student_entered_name = request.GET.get('name')\n Student.objects.create(name=student_entered_name)\n print(student_entered_name)\n context = {}\n return render(request, 'student_list.html', context)\n",
"step-4": "from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse\nfrom .models import Student\n\n\ndef index(request):\n student_objects = Student.objects.all()\n context = {'students': student_objects}\n return render(request, 'student_list.html', context)\n\n\ndef addstudent(request):\n context = {}\n return render(request, 'add_student.html', context)\n\n\ndef newstudent(request):\n student_entered_name = request.GET.get('name')\n Student.objects.create(name=student_entered_name)\n print(student_entered_name)\n context = {}\n return render(request, 'student_list.html', context)\n",
"step-5": "from django.shortcuts import render\nfrom django.template import loader\n\n# Create your views here.\n\nfrom django.http import HttpResponse\n\nfrom .models import Student\n\ndef index(request):\n\tstudent_objects = Student.objects.all()\n\tcontext = {\"students\": student_objects}\n\treturn render(request, 'student_list.html', context)\n\ndef addstudent(request):\n\tcontext = {}\n\treturn render(request, 'add_student.html', context)\n\ndef newstudent(request):\n\tstudent_entered_name = request.GET.get('name')\n\tStudent.objects.create(name=student_entered_name)\n\tprint(student_entered_name)\n\tcontext = {}\n\treturn render(request, 'student_list.html', context)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@api_view(['POST'])
@authenticated
def fetchCurriculum(request):
university = request.DATA['user'].university.shortname
if university == 'Unknown':
ret = produceRetCode('fail', 'university not supported')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
eas_id = request.DATA['eas_id']
eas_pwd = request.DATA['eas_pwd']
except KeyError:
ret = produceRetCode('fail', 'eas id and eas pwd required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
semester = request.DATA['semester']
except KeyError:
ret = produceRetCode('fail', 'semester required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)
if fetched['status'] == 'success':
ret = _data_processor[university].process(fetched['raw-data'],
semester, request.DATA['user'])
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', fetched['message'])
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
def getCourseList(request):
courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(
section__start__lte=datetime.datetime.now()).filter(section__end__gte
=datetime.datetime.now())
serializer = CourseItemSerializer(courses, many=True)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_200_OK)
<|reserved_special_token_0|>
@api_view(['POST'])
@authenticated
@authreview
def alterReview(request):
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def deleteReview(request):
request.DATA['review'].delete()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['POST'])
@authenticated
def fetchCurriculum(request):
university = request.DATA['user'].university.shortname
if university == 'Unknown':
ret = produceRetCode('fail', 'university not supported')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
eas_id = request.DATA['eas_id']
eas_pwd = request.DATA['eas_pwd']
except KeyError:
ret = produceRetCode('fail', 'eas id and eas pwd required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
semester = request.DATA['semester']
except KeyError:
ret = produceRetCode('fail', 'semester required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)
if fetched['status'] == 'success':
ret = _data_processor[university].process(fetched['raw-data'],
semester, request.DATA['user'])
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', fetched['message'])
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
def getCourseList(request):
courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(
section__start__lte=datetime.datetime.now()).filter(section__end__gte
=datetime.datetime.now())
serializer = CourseItemSerializer(courses, many=True)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_200_OK)
def authreview(method):
def wrapper(request):
try:
rid = request.DATA['rid']
except KeyError:
ret = produceRetCode('fail', 'rid required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(id=rid)
except Review.DoesNotExist:
ret = produceRetCode('fail', 'review does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if review.user == request.DATA['user'].id:
request.DATA['review'] = review
else:
ret = produceRetCode('fail', 'permission denied')
return Response(ret, status=status.HTTP_202_ACCEPTED)
return method(request)
return wrapper
@api_view(['POST'])
@authenticated
def setReview(request):
request.DATA['user'] = request.DATA['user'].id
serializer = ReviewSerializer(data=request.DATA)
try:
is_course = request.DATA['is_course']
except KeyError:
ret = produceRetCode('fail', 'is_course flag required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if is_course:
try:
section = request.DATA['section']
except KeyError:
ret = produceRetCode('fail', 'section id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
section = Section.objects.get(id=section)
except Section.DoesNotExist:
ret = produceRetCode('fail', 'section does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'], section=
section.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount +
request.DATA['rate']) / (section.ratecount + 1)
section.ratecount = section.ratecount + 1
section.save()
except Exception:
ret = produceRetCode('fail', 'computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'add review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount - review.
rate + request.DATA['rate']) / section.ratecount
section.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'change review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
try:
professor = request.DATA['professor']
except KeyError:
ret = produceRetCode('fail', 'professor id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
professor = Professor.objects.get(id=professor)
except Professor.DoesNotExist:
ret = produceRetCode('fail', 'professor does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'],
professor=professor.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount +
request.DATA['rate']) / (professor.ratecount + 1)
professor.ratecount = professor.ratecount + 1
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount -
review.rate + request.DATA['rate']) / professor.ratecount
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
<|reserved_special_token_0|>
@api_view(['POST'])
@authenticated
@authreview
def alterReview(request):
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def deleteReview(request):
request.DATA['review'].delete()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['POST'])
@authenticated
def fetchCurriculum(request):
university = request.DATA['user'].university.shortname
if university == 'Unknown':
ret = produceRetCode('fail', 'university not supported')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
eas_id = request.DATA['eas_id']
eas_pwd = request.DATA['eas_pwd']
except KeyError:
ret = produceRetCode('fail', 'eas id and eas pwd required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
semester = request.DATA['semester']
except KeyError:
ret = produceRetCode('fail', 'semester required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)
if fetched['status'] == 'success':
ret = _data_processor[university].process(fetched['raw-data'],
semester, request.DATA['user'])
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', fetched['message'])
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
def getCourseList(request):
courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(
section__start__lte=datetime.datetime.now()).filter(section__end__gte
=datetime.datetime.now())
serializer = CourseItemSerializer(courses, many=True)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_200_OK)
def authreview(method):
def wrapper(request):
try:
rid = request.DATA['rid']
except KeyError:
ret = produceRetCode('fail', 'rid required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(id=rid)
except Review.DoesNotExist:
ret = produceRetCode('fail', 'review does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if review.user == request.DATA['user'].id:
request.DATA['review'] = review
else:
ret = produceRetCode('fail', 'permission denied')
return Response(ret, status=status.HTTP_202_ACCEPTED)
return method(request)
return wrapper
@api_view(['POST'])
@authenticated
def setReview(request):
request.DATA['user'] = request.DATA['user'].id
serializer = ReviewSerializer(data=request.DATA)
try:
is_course = request.DATA['is_course']
except KeyError:
ret = produceRetCode('fail', 'is_course flag required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if is_course:
try:
section = request.DATA['section']
except KeyError:
ret = produceRetCode('fail', 'section id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
section = Section.objects.get(id=section)
except Section.DoesNotExist:
ret = produceRetCode('fail', 'section does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'], section=
section.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount +
request.DATA['rate']) / (section.ratecount + 1)
section.ratecount = section.ratecount + 1
section.save()
except Exception:
ret = produceRetCode('fail', 'computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'add review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount - review.
rate + request.DATA['rate']) / section.ratecount
section.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'change review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
try:
professor = request.DATA['professor']
except KeyError:
ret = produceRetCode('fail', 'professor id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
professor = Professor.objects.get(id=professor)
except Professor.DoesNotExist:
ret = produceRetCode('fail', 'professor does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'],
professor=professor.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount +
request.DATA['rate']) / (professor.ratecount + 1)
professor.ratecount = professor.ratecount + 1
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount -
review.rate + request.DATA['rate']) / professor.ratecount
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def getReview(request):
serializer = ReviewSerializer(data)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def alterReview(request):
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def deleteReview(request):
request.DATA['review'].delete()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_data_processor = {}
<|reserved_special_token_0|>
_data_processor['UCB'] = _UCB
_data_processor['PU'] = _PU
@api_view(['POST'])
@authenticated
def fetchCurriculum(request):
university = request.DATA['user'].university.shortname
if university == 'Unknown':
ret = produceRetCode('fail', 'university not supported')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
eas_id = request.DATA['eas_id']
eas_pwd = request.DATA['eas_pwd']
except KeyError:
ret = produceRetCode('fail', 'eas id and eas pwd required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
semester = request.DATA['semester']
except KeyError:
ret = produceRetCode('fail', 'semester required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)
if fetched['status'] == 'success':
ret = _data_processor[university].process(fetched['raw-data'],
semester, request.DATA['user'])
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', fetched['message'])
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
def getCourseList(request):
courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(
section__start__lte=datetime.datetime.now()).filter(section__end__gte
=datetime.datetime.now())
serializer = CourseItemSerializer(courses, many=True)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_200_OK)
def authreview(method):
def wrapper(request):
try:
rid = request.DATA['rid']
except KeyError:
ret = produceRetCode('fail', 'rid required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(id=rid)
except Review.DoesNotExist:
ret = produceRetCode('fail', 'review does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if review.user == request.DATA['user'].id:
request.DATA['review'] = review
else:
ret = produceRetCode('fail', 'permission denied')
return Response(ret, status=status.HTTP_202_ACCEPTED)
return method(request)
return wrapper
@api_view(['POST'])
@authenticated
def setReview(request):
request.DATA['user'] = request.DATA['user'].id
serializer = ReviewSerializer(data=request.DATA)
try:
is_course = request.DATA['is_course']
except KeyError:
ret = produceRetCode('fail', 'is_course flag required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if is_course:
try:
section = request.DATA['section']
except KeyError:
ret = produceRetCode('fail', 'section id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
section = Section.objects.get(id=section)
except Section.DoesNotExist:
ret = produceRetCode('fail', 'section does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'], section=
section.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount +
request.DATA['rate']) / (section.ratecount + 1)
section.ratecount = section.ratecount + 1
section.save()
except Exception:
ret = produceRetCode('fail', 'computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'add review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount - review.
rate + request.DATA['rate']) / section.ratecount
section.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'change review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
try:
professor = request.DATA['professor']
except KeyError:
ret = produceRetCode('fail', 'professor id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
professor = Professor.objects.get(id=professor)
except Professor.DoesNotExist:
ret = produceRetCode('fail', 'professor does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'],
professor=professor.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount +
request.DATA['rate']) / (professor.ratecount + 1)
professor.ratecount = professor.ratecount + 1
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount -
review.rate + request.DATA['rate']) / professor.ratecount
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def getReview(request):
serializer = ReviewSerializer(data)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def alterReview(request):
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def deleteReview(request):
request.DATA['review'].delete()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
from backend.personal.models import User, UserState
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from backend.personal.views import produceRetCode, authenticated
from backend.utils.fetch.fetch import fetch_curriculum
from backend.univinfo.models import Professor, Section, Course
from backend.univinfo.serializers import CourseSerializer
from backend.curriculum.models import CourseItem, Review
from backend.curriculum.serializers import CourseItemSerializer, ReviewSerializer
import datetime
_data_processor = {}
from backend.utils.process import _UCB
from backend.utils.process import _PU
_data_processor['UCB'] = _UCB
_data_processor['PU'] = _PU
@api_view(['POST'])
@authenticated
def fetchCurriculum(request):
university = request.DATA['user'].university.shortname
if university == 'Unknown':
ret = produceRetCode('fail', 'university not supported')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
eas_id = request.DATA['eas_id']
eas_pwd = request.DATA['eas_pwd']
except KeyError:
ret = produceRetCode('fail', 'eas id and eas pwd required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
semester = request.DATA['semester']
except KeyError:
ret = produceRetCode('fail', 'semester required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)
#import pickle
#with open('data.pickle', 'rb') as f:
# fetched = pickle.load(f)
if fetched['status'] == 'success':
ret = _data_processor[university].process(fetched['raw-data'], semester, request.DATA['user'])
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', fetched['message'])
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
def getCourseList(request):
courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(section__start__lte=datetime.datetime.now()).filter(section__end__gte=datetime.datetime.now())
serializer = CourseItemSerializer(courses, many=True)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_200_OK)
def authreview(method):
def wrapper(request):
try:
rid = request.DATA['rid']
except KeyError:
ret = produceRetCode('fail', 'rid required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(id=rid)
except Review.DoesNotExist:
ret = produceRetCode('fail', 'review does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if review.user == request.DATA['user'].id:
request.DATA['review'] = review
else:
ret = produceRetCode('fail', 'permission denied')
return Response(ret, status=status.HTTP_202_ACCEPTED)
return method(request)
return wrapper
@api_view(['POST'])
@authenticated
def setReview(request):
request.DATA['user'] = request.DATA['user'].id
serializer = ReviewSerializer(data=request.DATA)
try:
is_course = request.DATA['is_course']
except KeyError:
ret = produceRetCode('fail', 'is_course flag required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
if is_course:
try:
section = request.DATA['section']
except KeyError:
ret = produceRetCode('fail', 'section id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
section = Section.objects.get(id=section)
except Section.DoesNotExist:
ret = produceRetCode('fail', 'section does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'], section=section.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount + request.DATA['rate']) / (section.ratecount + 1)
section.ratecount = section.ratecount + 1
section.save()
except Exception:
ret = produceRetCode('fail', 'computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'add review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
section.rate = (section.rate * section.ratecount - review.rate + request.DATA['rate']) / section.ratecount
section.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'change review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
try:
professor = request.DATA['professor']
except KeyError:
ret = produceRetCode('fail', 'professor id required')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
professor = Professor.objects.get(id=professor)
except Professor.DoesNotExist:
ret = produceRetCode('fail', 'professor does not exist')
return Response(ret, status=status.HTTP_202_ACCEPTED)
try:
review = Review.objects.get(user=request.DATA['user'], professor=professor.id)
except Review.DoesNotExist:
serializer = ReviewSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount + request.DATA['rate']) / (professor.ratecount + 1)
professor.ratecount = professor.ratecount + 1
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
try:
professor.rate = (professor.rate * professor.ratecount - review.rate + request.DATA['rate']) / professor.ratecount
professor.save()
except Exception:
ret = produceRetCode('fail', 'rate computing error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def getReview(request):
serializer = ReviewSerializer(data)
ret = produceRetCode('success', '', serializer.data)
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def alterReview(request):
serializer = ReviewSerializer(review, data=request.DATA)
if serializer.is_valid():
serializer.save()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
else:
ret = produceRetCode('fail', 'review data format error')
return Response(ret, status=status.HTTP_202_ACCEPTED)
@api_view(['POST'])
@authenticated
@authreview
def deleteReview(request):
request.DATA['review'].delete()
ret = produceRetCode('success')
return Response(ret, status=status.HTTP_200_OK)
|
flexible
|
{
"blob_id": "a33ddb999f7bb50688b33946046ba460cbbbd172",
"index": 9181,
"step-1": "<mask token>\n\n\n@api_view(['POST'])\n@authenticated\ndef fetchCurriculum(request):\n university = request.DATA['user'].university.shortname\n if university == 'Unknown':\n ret = produceRetCode('fail', 'university not supported')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n eas_id = request.DATA['eas_id']\n eas_pwd = request.DATA['eas_pwd']\n except KeyError:\n ret = produceRetCode('fail', 'eas id and eas pwd required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n semester = request.DATA['semester']\n except KeyError:\n ret = produceRetCode('fail', 'semester required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)\n if fetched['status'] == 'success':\n ret = _data_processor[university].process(fetched['raw-data'],\n semester, request.DATA['user'])\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', fetched['message'])\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\ndef getCourseList(request):\n courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(\n section__start__lte=datetime.datetime.now()).filter(section__end__gte\n =datetime.datetime.now())\n serializer = CourseItemSerializer(courses, many=True)\n ret = produceRetCode('success', '', serializer.data)\n return Response(ret, status=status.HTTP_200_OK)\n\n\n<mask token>\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef alterReview(request):\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef deleteReview(request):\n request.DATA['review'].delete()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\n@api_view(['POST'])\n@authenticated\ndef fetchCurriculum(request):\n university = request.DATA['user'].university.shortname\n if university == 'Unknown':\n ret = produceRetCode('fail', 'university not supported')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n eas_id = request.DATA['eas_id']\n eas_pwd = request.DATA['eas_pwd']\n except KeyError:\n ret = produceRetCode('fail', 'eas id and eas pwd required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n semester = request.DATA['semester']\n except KeyError:\n ret = produceRetCode('fail', 'semester required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)\n if fetched['status'] == 'success':\n ret = _data_processor[university].process(fetched['raw-data'],\n semester, request.DATA['user'])\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', fetched['message'])\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\ndef getCourseList(request):\n courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(\n section__start__lte=datetime.datetime.now()).filter(section__end__gte\n =datetime.datetime.now())\n serializer = CourseItemSerializer(courses, many=True)\n ret = produceRetCode('success', '', serializer.data)\n return Response(ret, status=status.HTTP_200_OK)\n\n\ndef authreview(method):\n\n def wrapper(request):\n try:\n rid = request.DATA['rid']\n except KeyError:\n ret = produceRetCode('fail', 'rid required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(id=rid)\n except Review.DoesNotExist:\n ret = produceRetCode('fail', 'review does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n if review.user == request.DATA['user'].id:\n request.DATA['review'] = review\n else:\n ret = produceRetCode('fail', 'permission denied')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n return method(request)\n return wrapper\n\n\n@api_view(['POST'])\n@authenticated\ndef setReview(request):\n request.DATA['user'] = request.DATA['user'].id\n serializer = ReviewSerializer(data=request.DATA)\n try:\n is_course = request.DATA['is_course']\n except KeyError:\n ret = produceRetCode('fail', 'is_course flag required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n if is_course:\n try:\n section = request.DATA['section']\n except KeyError:\n ret = produceRetCode('fail', 'section id required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n section = Section.objects.get(id=section)\n except Section.DoesNotExist:\n ret = produceRetCode('fail', 'section does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(user=request.DATA['user'], section=\n section.id)\n except Review.DoesNotExist:\n serializer = ReviewSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n section.rate = (section.rate * section.ratecount +\n request.DATA['rate']) / (section.ratecount + 1)\n section.ratecount = section.ratecount + 1\n section.save()\n except Exception:\n ret = produceRetCode('fail', 'computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'add review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n section.rate = (section.rate * section.ratecount - review.\n rate + request.DATA['rate']) / section.ratecount\n section.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'change review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n try:\n professor = request.DATA['professor']\n except KeyError:\n ret = produceRetCode('fail', 'professor id required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n professor = Professor.objects.get(id=professor)\n except Professor.DoesNotExist:\n ret = produceRetCode('fail', 'professor does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(user=request.DATA['user'],\n professor=professor.id)\n except Review.DoesNotExist:\n serializer = ReviewSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n professor.rate = (professor.rate * professor.ratecount +\n request.DATA['rate']) / (professor.ratecount + 1)\n professor.ratecount = professor.ratecount + 1\n professor.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n professor.rate = (professor.rate * professor.ratecount -\n review.rate + request.DATA['rate']) / professor.ratecount\n professor.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n<mask token>\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef alterReview(request):\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef deleteReview(request):\n request.DATA['review'].delete()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\n@api_view(['POST'])\n@authenticated\ndef fetchCurriculum(request):\n university = request.DATA['user'].university.shortname\n if university == 'Unknown':\n ret = produceRetCode('fail', 'university not supported')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n eas_id = request.DATA['eas_id']\n eas_pwd = request.DATA['eas_pwd']\n except KeyError:\n ret = produceRetCode('fail', 'eas id and eas pwd required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n semester = request.DATA['semester']\n except KeyError:\n ret = produceRetCode('fail', 'semester required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)\n if fetched['status'] == 'success':\n ret = _data_processor[university].process(fetched['raw-data'],\n semester, request.DATA['user'])\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', fetched['message'])\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\ndef getCourseList(request):\n courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(\n section__start__lte=datetime.datetime.now()).filter(section__end__gte\n =datetime.datetime.now())\n serializer = CourseItemSerializer(courses, many=True)\n ret = produceRetCode('success', '', serializer.data)\n return Response(ret, status=status.HTTP_200_OK)\n\n\ndef authreview(method):\n\n def wrapper(request):\n try:\n rid = request.DATA['rid']\n except KeyError:\n ret = produceRetCode('fail', 'rid required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(id=rid)\n except Review.DoesNotExist:\n ret = produceRetCode('fail', 'review does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n if review.user == request.DATA['user'].id:\n request.DATA['review'] = review\n else:\n ret = produceRetCode('fail', 'permission denied')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n return method(request)\n return wrapper\n\n\n@api_view(['POST'])\n@authenticated\ndef setReview(request):\n request.DATA['user'] = request.DATA['user'].id\n serializer = ReviewSerializer(data=request.DATA)\n try:\n is_course = request.DATA['is_course']\n except KeyError:\n ret = produceRetCode('fail', 'is_course flag required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n if is_course:\n try:\n section = request.DATA['section']\n except KeyError:\n ret = produceRetCode('fail', 'section id required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n section = Section.objects.get(id=section)\n except Section.DoesNotExist:\n ret = produceRetCode('fail', 'section does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(user=request.DATA['user'], section=\n section.id)\n except Review.DoesNotExist:\n serializer = ReviewSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n section.rate = (section.rate * section.ratecount +\n request.DATA['rate']) / (section.ratecount + 1)\n section.ratecount = section.ratecount + 1\n section.save()\n except Exception:\n ret = produceRetCode('fail', 'computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'add review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n section.rate = (section.rate * section.ratecount - review.\n rate + request.DATA['rate']) / section.ratecount\n section.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'change review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n try:\n professor = request.DATA['professor']\n except KeyError:\n ret = produceRetCode('fail', 'professor id required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n professor = Professor.objects.get(id=professor)\n except Professor.DoesNotExist:\n ret = produceRetCode('fail', 'professor does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(user=request.DATA['user'],\n professor=professor.id)\n except Review.DoesNotExist:\n serializer = ReviewSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n professor.rate = (professor.rate * professor.ratecount +\n request.DATA['rate']) / (professor.ratecount + 1)\n professor.ratecount = professor.ratecount + 1\n professor.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n professor.rate = (professor.rate * professor.ratecount -\n review.rate + request.DATA['rate']) / professor.ratecount\n professor.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef getReview(request):\n serializer = ReviewSerializer(data)\n ret = produceRetCode('success', '', serializer.data)\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef alterReview(request):\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef deleteReview(request):\n request.DATA['review'].delete()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n",
"step-4": "<mask token>\n_data_processor = {}\n<mask token>\n_data_processor['UCB'] = _UCB\n_data_processor['PU'] = _PU\n\n\n@api_view(['POST'])\n@authenticated\ndef fetchCurriculum(request):\n university = request.DATA['user'].university.shortname\n if university == 'Unknown':\n ret = produceRetCode('fail', 'university not supported')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n eas_id = request.DATA['eas_id']\n eas_pwd = request.DATA['eas_pwd']\n except KeyError:\n ret = produceRetCode('fail', 'eas id and eas pwd required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n semester = request.DATA['semester']\n except KeyError:\n ret = produceRetCode('fail', 'semester required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n fetched = fetch_curriculum(university, eas_id, eas_pwd, semester)\n if fetched['status'] == 'success':\n ret = _data_processor[university].process(fetched['raw-data'],\n semester, request.DATA['user'])\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', fetched['message'])\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\ndef getCourseList(request):\n courses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(\n section__start__lte=datetime.datetime.now()).filter(section__end__gte\n =datetime.datetime.now())\n serializer = CourseItemSerializer(courses, many=True)\n ret = produceRetCode('success', '', serializer.data)\n return Response(ret, status=status.HTTP_200_OK)\n\n\ndef authreview(method):\n\n def wrapper(request):\n try:\n rid = request.DATA['rid']\n except KeyError:\n ret = produceRetCode('fail', 'rid required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(id=rid)\n except Review.DoesNotExist:\n ret = produceRetCode('fail', 'review does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n if review.user == request.DATA['user'].id:\n request.DATA['review'] = review\n else:\n ret = produceRetCode('fail', 'permission denied')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n return method(request)\n return wrapper\n\n\n@api_view(['POST'])\n@authenticated\ndef setReview(request):\n request.DATA['user'] = request.DATA['user'].id\n serializer = ReviewSerializer(data=request.DATA)\n try:\n is_course = request.DATA['is_course']\n except KeyError:\n ret = produceRetCode('fail', 'is_course flag required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n if is_course:\n try:\n section = request.DATA['section']\n except KeyError:\n ret = produceRetCode('fail', 'section id required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n section = Section.objects.get(id=section)\n except Section.DoesNotExist:\n ret = produceRetCode('fail', 'section does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(user=request.DATA['user'], section=\n section.id)\n except Review.DoesNotExist:\n serializer = ReviewSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n section.rate = (section.rate * section.ratecount +\n request.DATA['rate']) / (section.ratecount + 1)\n section.ratecount = section.ratecount + 1\n section.save()\n except Exception:\n ret = produceRetCode('fail', 'computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'add review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n section.rate = (section.rate * section.ratecount - review.\n rate + request.DATA['rate']) / section.ratecount\n section.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'change review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n try:\n professor = request.DATA['professor']\n except KeyError:\n ret = produceRetCode('fail', 'professor id required')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n professor = Professor.objects.get(id=professor)\n except Professor.DoesNotExist:\n ret = produceRetCode('fail', 'professor does not exist')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n try:\n review = Review.objects.get(user=request.DATA['user'],\n professor=professor.id)\n except Review.DoesNotExist:\n serializer = ReviewSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n professor.rate = (professor.rate * professor.ratecount +\n request.DATA['rate']) / (professor.ratecount + 1)\n professor.ratecount = professor.ratecount + 1\n professor.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n try:\n professor.rate = (professor.rate * professor.ratecount -\n review.rate + request.DATA['rate']) / professor.ratecount\n professor.save()\n except Exception:\n ret = produceRetCode('fail', 'rate computing error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef getReview(request):\n serializer = ReviewSerializer(data)\n ret = produceRetCode('success', '', serializer.data)\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef alterReview(request):\n serializer = ReviewSerializer(review, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n else:\n ret = produceRetCode('fail', 'review data format error')\n return Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef deleteReview(request):\n request.DATA['review'].delete()\n ret = produceRetCode('success')\n return Response(ret, status=status.HTTP_200_OK)\n",
"step-5": "from backend.personal.models import User, UserState\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom backend.personal.views import produceRetCode, authenticated\nfrom backend.utils.fetch.fetch import fetch_curriculum\nfrom backend.univinfo.models import Professor, Section, Course\nfrom backend.univinfo.serializers import CourseSerializer\nfrom backend.curriculum.models import CourseItem, Review\nfrom backend.curriculum.serializers import CourseItemSerializer, ReviewSerializer\nimport datetime\n\n_data_processor = {}\nfrom backend.utils.process import _UCB\nfrom backend.utils.process import _PU\n_data_processor['UCB'] = _UCB\n_data_processor['PU'] = _PU\n\n\n@api_view(['POST'])\n@authenticated\ndef fetchCurriculum(request):\n\tuniversity = request.DATA['user'].university.shortname\n\tif university == 'Unknown':\n\t\tret = produceRetCode('fail', 'university not supported')\n\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\ttry:\n\t\teas_id = request.DATA['eas_id']\n\t\teas_pwd = request.DATA['eas_pwd']\n\texcept KeyError:\n\t\tret = produceRetCode('fail', 'eas id and eas pwd required')\n\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\ttry:\n\t\tsemester = request.DATA['semester']\n\texcept KeyError:\n\t\tret = produceRetCode('fail', 'semester required')\n\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\tfetched = fetch_curriculum(university, eas_id, eas_pwd, semester)\n\t#import pickle\n\t#with open('data.pickle', 'rb') as f:\n\t#\tfetched = pickle.load(f)\n\tif fetched['status'] == 'success':\n\t\tret = _data_processor[university].process(fetched['raw-data'], semester, request.DATA['user'])\n\t\treturn Response(ret, status=status.HTTP_200_OK)\n\telse:\n\t\tret = produceRetCode('fail', fetched['message'])\n\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\n@api_view(['POST'])\n@authenticated\ndef getCourseList(request):\n\tcourses = CourseItem.objects.filter(user=request.DATA['user'].id).filter(section__start__lte=datetime.datetime.now()).filter(section__end__gte=datetime.datetime.now())\n\tserializer = CourseItemSerializer(courses, many=True)\n\tret = produceRetCode('success', '', serializer.data)\n\treturn Response(ret, status=status.HTTP_200_OK)\n\ndef authreview(method):\n\tdef wrapper(request):\n\t\ttry:\n\t\t\trid = request.DATA['rid']\n\t\texcept KeyError:\n\t\t\tret = produceRetCode('fail', 'rid required')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\ttry:\n\t\t\treview = Review.objects.get(id=rid)\n\t\texcept Review.DoesNotExist:\n\t\t\tret = produceRetCode('fail', 'review does not exist')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\tif review.user == request.DATA['user'].id:\n\t\t\trequest.DATA['review'] = review\n\t\telse:\n\t\t\tret = produceRetCode('fail', 'permission denied')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\treturn method(request)\n\treturn wrapper\n\n@api_view(['POST'])\n@authenticated\ndef setReview(request):\n\trequest.DATA['user'] = request.DATA['user'].id\n\tserializer = ReviewSerializer(data=request.DATA)\n\ttry:\n\t\tis_course = request.DATA['is_course']\n\texcept KeyError:\n\t\tret = produceRetCode('fail', 'is_course flag required')\n\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\tif is_course:\n\t\ttry:\n\t\t\tsection = request.DATA['section']\n\t\texcept KeyError:\n\t\t\tret = produceRetCode('fail', 'section id required')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\ttry:\n\t\t\tsection = Section.objects.get(id=section)\n\t\texcept Section.DoesNotExist:\n\t\t\tret = produceRetCode('fail', 'section does not exist')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\ttry:\n\t\t\treview = Review.objects.get(user=request.DATA['user'], section=section.id)\n\t\texcept Review.DoesNotExist:\n\t\t\tserializer = ReviewSerializer(data=request.DATA)\n\t\t\tif serializer.is_valid():\n\t\t\t\tserializer.save()\n\t\t\t\ttry:\n\t\t\t\t\tsection.rate = (section.rate * section.ratecount + request.DATA['rate']) / (section.ratecount + 1)\n\t\t\t\t\tsection.ratecount = section.ratecount + 1\n\t\t\t\t\tsection.save()\n\t\t\t\texcept Exception:\n\t\t\t\t\tret = produceRetCode('fail', 'computing error')\n\t\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\t\telse:\n\t\t\t\tret = produceRetCode('fail', 'add review data format error')\n\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\tserializer = ReviewSerializer(review, data=request.DATA)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\ttry:\n\t\t\t\tsection.rate = (section.rate * section.ratecount - review.rate + request.DATA['rate']) / section.ratecount\n\t\t\t\tsection.save()\n\t\t\texcept Exception:\n\t\t\t\tret = produceRetCode('fail', 'rate computing error')\n\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\telse:\n\t\t\t\tret = produceRetCode('fail', 'change review data format error')\n\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\telse:\n\t\ttry:\n\t\t\tprofessor = request.DATA['professor']\n\t\texcept KeyError:\n\t\t\tret = produceRetCode('fail', 'professor id required')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\ttry:\n\t\t\tprofessor = Professor.objects.get(id=professor)\n\t\texcept Professor.DoesNotExist:\n\t\t\tret = produceRetCode('fail', 'professor does not exist')\n\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\ttry:\n\t\t\treview = Review.objects.get(user=request.DATA['user'], professor=professor.id)\n\t\texcept Review.DoesNotExist:\n\t\t\tserializer = ReviewSerializer(data=request.DATA)\n\t\t\tif serializer.is_valid():\n\t\t\t\tserializer.save()\n\t\t\t\ttry:\n\t\t\t\t\tprofessor.rate = (professor.rate * professor.ratecount + request.DATA['rate']) / (professor.ratecount + 1)\n\t\t\t\t\tprofessor.ratecount = professor.ratecount + 1\n\t\t\t\t\tprofessor.save()\n\t\t\t\texcept Exception:\n\t\t\t\t\tret = produceRetCode('fail', 'rate computing error')\n\t\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\t\telse:\n\t\t\t\tret = produceRetCode('fail', 'review data format error')\n\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\tserializer = ReviewSerializer(review, data=request.DATA)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\ttry:\n\t\t\t\tprofessor.rate = (professor.rate * professor.ratecount - review.rate + request.DATA['rate']) / professor.ratecount\n\t\t\t\tprofessor.save()\n\t\t\texcept Exception:\n\t\t\t\tret = produceRetCode('fail', 'rate computing error')\n\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\t\telse:\n\t\t\t\tret = produceRetCode('fail', 'review data format error')\n\t\t\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef getReview(request):\n\tserializer = ReviewSerializer(data)\n\tret = produceRetCode('success', '', serializer.data)\n\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef alterReview(request):\n\tserializer = ReviewSerializer(review, data=request.DATA)\n\tif serializer.is_valid():\n\t\tserializer.save()\n\t\tret = produceRetCode('success')\n\t\treturn Response(ret, status=status.HTTP_200_OK)\n\telse:\n\t\tret = produceRetCode('fail', 'review data format error')\n\t\treturn Response(ret, status=status.HTTP_202_ACCEPTED)\n\n@api_view(['POST'])\n@authenticated\n@authreview\ndef deleteReview(request):\n\trequest.DATA['review'].delete()\n\tret = produceRetCode('success')\n\treturn Response(ret, status=status.HTTP_200_OK)\n\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize(('num_of_elements', 'middle_idx', 'window_size',
'expected_indices'), [(100, 0, 10, (0, 10)), (100, 1, 10, (0, 10)), (
100, 50, 10, (45, 55)), (271, 270, 10, (261, 271)), (314, 314, 10, (304,
314)), (100, 0, 11, (0, 11)), (100, 1, 11, (0, 11)), (100, 50, 11, (45,
56)), (271, 270, 11, (260, 271)), (314, 314, 11, (303, 314)), (11, 2,
11, (0, 11)), (11, 2, 33, (0, 11))], ids=str)
def test_window_indices_function(num_of_elements, middle_idx, window_size,
expected_indices):
min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx,
window_size)
assert (min_idx, max_idx) == expected_indices
test_list = list(range(num_of_elements))
assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)
def test_mono_temporal_cloud_detection(test_eopatch):
add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),
all_bands=True, is_data_feature=(FeatureType.MASK, 'IS_DATA'),
mono_features=('CLP_TEST', 'CLM_TEST'), mask_feature=None,
average_over=4, dilation_size=2, mono_threshold=0.4)
eop_clm = add_tcm(test_eopatch)
assert_array_equal(eop_clm.mask['CLM_TEST'], test_eopatch.mask['CLM_S2C'])
assert_array_equal(eop_clm.data['CLP_TEST'], test_eopatch.data['CLP_S2C'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize(('num_of_elements', 'middle_idx', 'window_size',
'expected_indices'), [(100, 0, 10, (0, 10)), (100, 1, 10, (0, 10)), (
100, 50, 10, (45, 55)), (271, 270, 10, (261, 271)), (314, 314, 10, (304,
314)), (100, 0, 11, (0, 11)), (100, 1, 11, (0, 11)), (100, 50, 11, (45,
56)), (271, 270, 11, (260, 271)), (314, 314, 11, (303, 314)), (11, 2,
11, (0, 11)), (11, 2, 33, (0, 11))], ids=str)
def test_window_indices_function(num_of_elements, middle_idx, window_size,
expected_indices):
min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx,
window_size)
assert (min_idx, max_idx) == expected_indices
test_list = list(range(num_of_elements))
assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)
def test_mono_temporal_cloud_detection(test_eopatch):
add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),
all_bands=True, is_data_feature=(FeatureType.MASK, 'IS_DATA'),
mono_features=('CLP_TEST', 'CLM_TEST'), mask_feature=None,
average_over=4, dilation_size=2, mono_threshold=0.4)
eop_clm = add_tcm(test_eopatch)
assert_array_equal(eop_clm.mask['CLM_TEST'], test_eopatch.mask['CLM_S2C'])
assert_array_equal(eop_clm.data['CLP_TEST'], test_eopatch.data['CLP_S2C'])
def test_multi_temporal_cloud_detection_downscaled(test_eopatch):
add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),
processing_resolution=120, mono_features=('CLP_TEST', 'CLM_TEST'),
multi_features=('CLP_MULTI_TEST', 'CLM_MULTI_TEST'), mask_feature=(
FeatureType.MASK, 'CLM_INTERSSIM_TEST'), average_over=8,
dilation_size=4)
eop_clm = add_tcm(test_eopatch)
for feature in ((FeatureType.MASK, 'CLM_TEST'), (FeatureType.DATA,
'CLP_TEST')):
assert eop_clm[feature].ndim == 4
assert eop_clm[feature].shape[:-1] == eop_clm.data['BANDS-S2-L1C'
].shape[:-1]
assert eop_clm[feature].shape[-1] == 1
assert eop_clm.mask['CLM_TEST'].dtype == bool
assert eop_clm.data['CLP_TEST'].dtype == np.float32
assert np.mean(eop_clm.mask['CLM_TEST']) == pytest.approx(np.mean(
eop_clm.mask['CLM_S2C']), abs=0.01)
assert np.mean(eop_clm.data['CLP_TEST']) == pytest.approx(np.mean(
eop_clm.data['CLP_S2C']), abs=0.01)
cloudless = np.mean(eop_clm.mask['CLM_TEST'], axis=(1, 2, 3)) == 0
assert np.mean(cloudless == eop_clm.label['IS_CLOUDLESS'][:, 0]) > 0.94
assert_array_equal(eop_clm.data['CLP_MULTI_TEST'], test_eopatch.data[
'CLP_MULTI'])
assert_array_equal(eop_clm.mask['CLM_MULTI_TEST'], test_eopatch.mask[
'CLM_MULTI'])
assert_array_equal(eop_clm.mask['CLM_INTERSSIM_TEST'], test_eopatch.
mask['CLM_INTERSSIM'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from eolearn.core import FeatureType
from eolearn.mask import CloudMaskTask
from eolearn.mask.cloud_mask import _get_window_indices
@pytest.mark.parametrize(('num_of_elements', 'middle_idx', 'window_size',
'expected_indices'), [(100, 0, 10, (0, 10)), (100, 1, 10, (0, 10)), (
100, 50, 10, (45, 55)), (271, 270, 10, (261, 271)), (314, 314, 10, (304,
314)), (100, 0, 11, (0, 11)), (100, 1, 11, (0, 11)), (100, 50, 11, (45,
56)), (271, 270, 11, (260, 271)), (314, 314, 11, (303, 314)), (11, 2,
11, (0, 11)), (11, 2, 33, (0, 11))], ids=str)
def test_window_indices_function(num_of_elements, middle_idx, window_size,
expected_indices):
min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx,
window_size)
assert (min_idx, max_idx) == expected_indices
test_list = list(range(num_of_elements))
assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)
def test_mono_temporal_cloud_detection(test_eopatch):
add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),
all_bands=True, is_data_feature=(FeatureType.MASK, 'IS_DATA'),
mono_features=('CLP_TEST', 'CLM_TEST'), mask_feature=None,
average_over=4, dilation_size=2, mono_threshold=0.4)
eop_clm = add_tcm(test_eopatch)
assert_array_equal(eop_clm.mask['CLM_TEST'], test_eopatch.mask['CLM_S2C'])
assert_array_equal(eop_clm.data['CLP_TEST'], test_eopatch.data['CLP_S2C'])
def test_multi_temporal_cloud_detection_downscaled(test_eopatch):
add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),
processing_resolution=120, mono_features=('CLP_TEST', 'CLM_TEST'),
multi_features=('CLP_MULTI_TEST', 'CLM_MULTI_TEST'), mask_feature=(
FeatureType.MASK, 'CLM_INTERSSIM_TEST'), average_over=8,
dilation_size=4)
eop_clm = add_tcm(test_eopatch)
for feature in ((FeatureType.MASK, 'CLM_TEST'), (FeatureType.DATA,
'CLP_TEST')):
assert eop_clm[feature].ndim == 4
assert eop_clm[feature].shape[:-1] == eop_clm.data['BANDS-S2-L1C'
].shape[:-1]
assert eop_clm[feature].shape[-1] == 1
assert eop_clm.mask['CLM_TEST'].dtype == bool
assert eop_clm.data['CLP_TEST'].dtype == np.float32
assert np.mean(eop_clm.mask['CLM_TEST']) == pytest.approx(np.mean(
eop_clm.mask['CLM_S2C']), abs=0.01)
assert np.mean(eop_clm.data['CLP_TEST']) == pytest.approx(np.mean(
eop_clm.data['CLP_S2C']), abs=0.01)
cloudless = np.mean(eop_clm.mask['CLM_TEST'], axis=(1, 2, 3)) == 0
assert np.mean(cloudless == eop_clm.label['IS_CLOUDLESS'][:, 0]) > 0.94
assert_array_equal(eop_clm.data['CLP_MULTI_TEST'], test_eopatch.data[
'CLP_MULTI'])
assert_array_equal(eop_clm.mask['CLM_MULTI_TEST'], test_eopatch.mask[
'CLM_MULTI'])
assert_array_equal(eop_clm.mask['CLM_INTERSSIM_TEST'], test_eopatch.
mask['CLM_INTERSSIM'])
<|reserved_special_token_1|>
"""
Copyright (c) 2017- Sinergise and contributors
For the full list of contributors, see the CREDITS file in the root directory of this source tree.
This source code is licensed under the MIT license, see the LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from eolearn.core import FeatureType
from eolearn.mask import CloudMaskTask
from eolearn.mask.cloud_mask import _get_window_indices
@pytest.mark.parametrize(
("num_of_elements", "middle_idx", "window_size", "expected_indices"),
[
(100, 0, 10, (0, 10)),
(100, 1, 10, (0, 10)),
(100, 50, 10, (45, 55)),
(271, 270, 10, (261, 271)),
(314, 314, 10, (304, 314)),
(100, 0, 11, (0, 11)),
(100, 1, 11, (0, 11)),
(100, 50, 11, (45, 56)),
(271, 270, 11, (260, 271)),
(314, 314, 11, (303, 314)),
(11, 2, 11, (0, 11)),
(11, 2, 33, (0, 11)),
],
ids=str,
)
def test_window_indices_function(num_of_elements, middle_idx, window_size, expected_indices):
min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx, window_size)
assert (min_idx, max_idx) == expected_indices
test_list = list(range(num_of_elements))
assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)
def test_mono_temporal_cloud_detection(test_eopatch):
add_tcm = CloudMaskTask(
data_feature=(FeatureType.DATA, "BANDS-S2-L1C"),
all_bands=True,
is_data_feature=(FeatureType.MASK, "IS_DATA"),
mono_features=("CLP_TEST", "CLM_TEST"),
mask_feature=None,
average_over=4,
dilation_size=2,
mono_threshold=0.4,
)
eop_clm = add_tcm(test_eopatch)
assert_array_equal(eop_clm.mask["CLM_TEST"], test_eopatch.mask["CLM_S2C"])
assert_array_equal(eop_clm.data["CLP_TEST"], test_eopatch.data["CLP_S2C"])
def test_multi_temporal_cloud_detection_downscaled(test_eopatch):
add_tcm = CloudMaskTask(
data_feature=(FeatureType.DATA, "BANDS-S2-L1C"),
processing_resolution=120,
mono_features=("CLP_TEST", "CLM_TEST"),
multi_features=("CLP_MULTI_TEST", "CLM_MULTI_TEST"),
mask_feature=(FeatureType.MASK, "CLM_INTERSSIM_TEST"),
average_over=8,
dilation_size=4,
)
eop_clm = add_tcm(test_eopatch)
# Check shape and type
for feature in ((FeatureType.MASK, "CLM_TEST"), (FeatureType.DATA, "CLP_TEST")):
assert eop_clm[feature].ndim == 4
assert eop_clm[feature].shape[:-1] == eop_clm.data["BANDS-S2-L1C"].shape[:-1]
assert eop_clm[feature].shape[-1] == 1
assert eop_clm.mask["CLM_TEST"].dtype == bool
assert eop_clm.data["CLP_TEST"].dtype == np.float32
# Compare mean cloud coverage with provided reference
assert np.mean(eop_clm.mask["CLM_TEST"]) == pytest.approx(np.mean(eop_clm.mask["CLM_S2C"]), abs=0.01)
assert np.mean(eop_clm.data["CLP_TEST"]) == pytest.approx(np.mean(eop_clm.data["CLP_S2C"]), abs=0.01)
# Check if most of the same times are flagged as cloudless
cloudless = np.mean(eop_clm.mask["CLM_TEST"], axis=(1, 2, 3)) == 0
assert np.mean(cloudless == eop_clm.label["IS_CLOUDLESS"][:, 0]) > 0.94
# Check multi-temporal results and final mask
assert_array_equal(eop_clm.data["CLP_MULTI_TEST"], test_eopatch.data["CLP_MULTI"])
assert_array_equal(eop_clm.mask["CLM_MULTI_TEST"], test_eopatch.mask["CLM_MULTI"])
assert_array_equal(eop_clm.mask["CLM_INTERSSIM_TEST"], test_eopatch.mask["CLM_INTERSSIM"])
|
flexible
|
{
"blob_id": "b7d7b6c070f237f9ab59f3367417ecf2672fbaaf",
"index": 6437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.parametrize(('num_of_elements', 'middle_idx', 'window_size',\n 'expected_indices'), [(100, 0, 10, (0, 10)), (100, 1, 10, (0, 10)), (\n 100, 50, 10, (45, 55)), (271, 270, 10, (261, 271)), (314, 314, 10, (304,\n 314)), (100, 0, 11, (0, 11)), (100, 1, 11, (0, 11)), (100, 50, 11, (45,\n 56)), (271, 270, 11, (260, 271)), (314, 314, 11, (303, 314)), (11, 2, \n 11, (0, 11)), (11, 2, 33, (0, 11))], ids=str)\ndef test_window_indices_function(num_of_elements, middle_idx, window_size,\n expected_indices):\n min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx,\n window_size)\n assert (min_idx, max_idx) == expected_indices\n test_list = list(range(num_of_elements))\n assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)\n\n\ndef test_mono_temporal_cloud_detection(test_eopatch):\n add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),\n all_bands=True, is_data_feature=(FeatureType.MASK, 'IS_DATA'),\n mono_features=('CLP_TEST', 'CLM_TEST'), mask_feature=None,\n average_over=4, dilation_size=2, mono_threshold=0.4)\n eop_clm = add_tcm(test_eopatch)\n assert_array_equal(eop_clm.mask['CLM_TEST'], test_eopatch.mask['CLM_S2C'])\n assert_array_equal(eop_clm.data['CLP_TEST'], test_eopatch.data['CLP_S2C'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@pytest.mark.parametrize(('num_of_elements', 'middle_idx', 'window_size',\n 'expected_indices'), [(100, 0, 10, (0, 10)), (100, 1, 10, (0, 10)), (\n 100, 50, 10, (45, 55)), (271, 270, 10, (261, 271)), (314, 314, 10, (304,\n 314)), (100, 0, 11, (0, 11)), (100, 1, 11, (0, 11)), (100, 50, 11, (45,\n 56)), (271, 270, 11, (260, 271)), (314, 314, 11, (303, 314)), (11, 2, \n 11, (0, 11)), (11, 2, 33, (0, 11))], ids=str)\ndef test_window_indices_function(num_of_elements, middle_idx, window_size,\n expected_indices):\n min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx,\n window_size)\n assert (min_idx, max_idx) == expected_indices\n test_list = list(range(num_of_elements))\n assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)\n\n\ndef test_mono_temporal_cloud_detection(test_eopatch):\n add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),\n all_bands=True, is_data_feature=(FeatureType.MASK, 'IS_DATA'),\n mono_features=('CLP_TEST', 'CLM_TEST'), mask_feature=None,\n average_over=4, dilation_size=2, mono_threshold=0.4)\n eop_clm = add_tcm(test_eopatch)\n assert_array_equal(eop_clm.mask['CLM_TEST'], test_eopatch.mask['CLM_S2C'])\n assert_array_equal(eop_clm.data['CLP_TEST'], test_eopatch.data['CLP_S2C'])\n\n\ndef test_multi_temporal_cloud_detection_downscaled(test_eopatch):\n add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),\n processing_resolution=120, mono_features=('CLP_TEST', 'CLM_TEST'),\n multi_features=('CLP_MULTI_TEST', 'CLM_MULTI_TEST'), mask_feature=(\n FeatureType.MASK, 'CLM_INTERSSIM_TEST'), average_over=8,\n dilation_size=4)\n eop_clm = add_tcm(test_eopatch)\n for feature in ((FeatureType.MASK, 'CLM_TEST'), (FeatureType.DATA,\n 'CLP_TEST')):\n assert eop_clm[feature].ndim == 4\n assert eop_clm[feature].shape[:-1] == eop_clm.data['BANDS-S2-L1C'\n ].shape[:-1]\n assert eop_clm[feature].shape[-1] == 1\n assert eop_clm.mask['CLM_TEST'].dtype == bool\n assert eop_clm.data['CLP_TEST'].dtype == np.float32\n assert np.mean(eop_clm.mask['CLM_TEST']) == pytest.approx(np.mean(\n eop_clm.mask['CLM_S2C']), abs=0.01)\n assert np.mean(eop_clm.data['CLP_TEST']) == pytest.approx(np.mean(\n eop_clm.data['CLP_S2C']), abs=0.01)\n cloudless = np.mean(eop_clm.mask['CLM_TEST'], axis=(1, 2, 3)) == 0\n assert np.mean(cloudless == eop_clm.label['IS_CLOUDLESS'][:, 0]) > 0.94\n assert_array_equal(eop_clm.data['CLP_MULTI_TEST'], test_eopatch.data[\n 'CLP_MULTI'])\n assert_array_equal(eop_clm.mask['CLM_MULTI_TEST'], test_eopatch.mask[\n 'CLM_MULTI'])\n assert_array_equal(eop_clm.mask['CLM_INTERSSIM_TEST'], test_eopatch.\n mask['CLM_INTERSSIM'])\n",
"step-4": "<mask token>\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\nfrom eolearn.core import FeatureType\nfrom eolearn.mask import CloudMaskTask\nfrom eolearn.mask.cloud_mask import _get_window_indices\n\n\n@pytest.mark.parametrize(('num_of_elements', 'middle_idx', 'window_size',\n 'expected_indices'), [(100, 0, 10, (0, 10)), (100, 1, 10, (0, 10)), (\n 100, 50, 10, (45, 55)), (271, 270, 10, (261, 271)), (314, 314, 10, (304,\n 314)), (100, 0, 11, (0, 11)), (100, 1, 11, (0, 11)), (100, 50, 11, (45,\n 56)), (271, 270, 11, (260, 271)), (314, 314, 11, (303, 314)), (11, 2, \n 11, (0, 11)), (11, 2, 33, (0, 11))], ids=str)\ndef test_window_indices_function(num_of_elements, middle_idx, window_size,\n expected_indices):\n min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx,\n window_size)\n assert (min_idx, max_idx) == expected_indices\n test_list = list(range(num_of_elements))\n assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)\n\n\ndef test_mono_temporal_cloud_detection(test_eopatch):\n add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),\n all_bands=True, is_data_feature=(FeatureType.MASK, 'IS_DATA'),\n mono_features=('CLP_TEST', 'CLM_TEST'), mask_feature=None,\n average_over=4, dilation_size=2, mono_threshold=0.4)\n eop_clm = add_tcm(test_eopatch)\n assert_array_equal(eop_clm.mask['CLM_TEST'], test_eopatch.mask['CLM_S2C'])\n assert_array_equal(eop_clm.data['CLP_TEST'], test_eopatch.data['CLP_S2C'])\n\n\ndef test_multi_temporal_cloud_detection_downscaled(test_eopatch):\n add_tcm = CloudMaskTask(data_feature=(FeatureType.DATA, 'BANDS-S2-L1C'),\n processing_resolution=120, mono_features=('CLP_TEST', 'CLM_TEST'),\n multi_features=('CLP_MULTI_TEST', 'CLM_MULTI_TEST'), mask_feature=(\n FeatureType.MASK, 'CLM_INTERSSIM_TEST'), average_over=8,\n dilation_size=4)\n eop_clm = add_tcm(test_eopatch)\n for feature in ((FeatureType.MASK, 'CLM_TEST'), (FeatureType.DATA,\n 'CLP_TEST')):\n assert eop_clm[feature].ndim == 4\n assert eop_clm[feature].shape[:-1] == eop_clm.data['BANDS-S2-L1C'\n ].shape[:-1]\n assert eop_clm[feature].shape[-1] == 1\n assert eop_clm.mask['CLM_TEST'].dtype == bool\n assert eop_clm.data['CLP_TEST'].dtype == np.float32\n assert np.mean(eop_clm.mask['CLM_TEST']) == pytest.approx(np.mean(\n eop_clm.mask['CLM_S2C']), abs=0.01)\n assert np.mean(eop_clm.data['CLP_TEST']) == pytest.approx(np.mean(\n eop_clm.data['CLP_S2C']), abs=0.01)\n cloudless = np.mean(eop_clm.mask['CLM_TEST'], axis=(1, 2, 3)) == 0\n assert np.mean(cloudless == eop_clm.label['IS_CLOUDLESS'][:, 0]) > 0.94\n assert_array_equal(eop_clm.data['CLP_MULTI_TEST'], test_eopatch.data[\n 'CLP_MULTI'])\n assert_array_equal(eop_clm.mask['CLM_MULTI_TEST'], test_eopatch.mask[\n 'CLM_MULTI'])\n assert_array_equal(eop_clm.mask['CLM_INTERSSIM_TEST'], test_eopatch.\n mask['CLM_INTERSSIM'])\n",
"step-5": "\"\"\"\nCopyright (c) 2017- Sinergise and contributors\nFor the full list of contributors, see the CREDITS file in the root directory of this source tree.\n\nThis source code is licensed under the MIT license, see the LICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom eolearn.core import FeatureType\nfrom eolearn.mask import CloudMaskTask\nfrom eolearn.mask.cloud_mask import _get_window_indices\n\n\n@pytest.mark.parametrize(\n (\"num_of_elements\", \"middle_idx\", \"window_size\", \"expected_indices\"),\n [\n (100, 0, 10, (0, 10)),\n (100, 1, 10, (0, 10)),\n (100, 50, 10, (45, 55)),\n (271, 270, 10, (261, 271)),\n (314, 314, 10, (304, 314)),\n (100, 0, 11, (0, 11)),\n (100, 1, 11, (0, 11)),\n (100, 50, 11, (45, 56)),\n (271, 270, 11, (260, 271)),\n (314, 314, 11, (303, 314)),\n (11, 2, 11, (0, 11)),\n (11, 2, 33, (0, 11)),\n ],\n ids=str,\n)\ndef test_window_indices_function(num_of_elements, middle_idx, window_size, expected_indices):\n min_idx, max_idx = _get_window_indices(num_of_elements, middle_idx, window_size)\n assert (min_idx, max_idx) == expected_indices\n\n test_list = list(range(num_of_elements))\n assert len(test_list[min_idx:max_idx]) == min(num_of_elements, window_size)\n\n\ndef test_mono_temporal_cloud_detection(test_eopatch):\n add_tcm = CloudMaskTask(\n data_feature=(FeatureType.DATA, \"BANDS-S2-L1C\"),\n all_bands=True,\n is_data_feature=(FeatureType.MASK, \"IS_DATA\"),\n mono_features=(\"CLP_TEST\", \"CLM_TEST\"),\n mask_feature=None,\n average_over=4,\n dilation_size=2,\n mono_threshold=0.4,\n )\n eop_clm = add_tcm(test_eopatch)\n\n assert_array_equal(eop_clm.mask[\"CLM_TEST\"], test_eopatch.mask[\"CLM_S2C\"])\n assert_array_equal(eop_clm.data[\"CLP_TEST\"], test_eopatch.data[\"CLP_S2C\"])\n\n\ndef test_multi_temporal_cloud_detection_downscaled(test_eopatch):\n add_tcm = CloudMaskTask(\n data_feature=(FeatureType.DATA, \"BANDS-S2-L1C\"),\n processing_resolution=120,\n mono_features=(\"CLP_TEST\", \"CLM_TEST\"),\n multi_features=(\"CLP_MULTI_TEST\", \"CLM_MULTI_TEST\"),\n mask_feature=(FeatureType.MASK, \"CLM_INTERSSIM_TEST\"),\n average_over=8,\n dilation_size=4,\n )\n eop_clm = add_tcm(test_eopatch)\n\n # Check shape and type\n for feature in ((FeatureType.MASK, \"CLM_TEST\"), (FeatureType.DATA, \"CLP_TEST\")):\n assert eop_clm[feature].ndim == 4\n assert eop_clm[feature].shape[:-1] == eop_clm.data[\"BANDS-S2-L1C\"].shape[:-1]\n assert eop_clm[feature].shape[-1] == 1\n assert eop_clm.mask[\"CLM_TEST\"].dtype == bool\n assert eop_clm.data[\"CLP_TEST\"].dtype == np.float32\n\n # Compare mean cloud coverage with provided reference\n assert np.mean(eop_clm.mask[\"CLM_TEST\"]) == pytest.approx(np.mean(eop_clm.mask[\"CLM_S2C\"]), abs=0.01)\n assert np.mean(eop_clm.data[\"CLP_TEST\"]) == pytest.approx(np.mean(eop_clm.data[\"CLP_S2C\"]), abs=0.01)\n\n # Check if most of the same times are flagged as cloudless\n cloudless = np.mean(eop_clm.mask[\"CLM_TEST\"], axis=(1, 2, 3)) == 0\n assert np.mean(cloudless == eop_clm.label[\"IS_CLOUDLESS\"][:, 0]) > 0.94\n\n # Check multi-temporal results and final mask\n assert_array_equal(eop_clm.data[\"CLP_MULTI_TEST\"], test_eopatch.data[\"CLP_MULTI\"])\n assert_array_equal(eop_clm.mask[\"CLM_MULTI_TEST\"], test_eopatch.mask[\"CLM_MULTI\"])\n assert_array_equal(eop_clm.mask[\"CLM_INTERSSIM_TEST\"], test_eopatch.mask[\"CLM_INTERSSIM\"])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import pytest
import json
import os.path
import importlib
import jsonpickle
from fixture.application import Application
fixture = None
config = None
@pytest.fixture
def app(request):
global fixture
global config
browser = request.config.getoption("--browser")
if config is None:
conf_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption("--config"))
with open(conf_file_path) as config_file:
config = json.load(config_file)
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=config["baseUrl"])
fixture.session.ensure_login(name=config["login"], pwd=config["password"])
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
global fixture
def finalizer():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(finalizer)
return fixture
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--config", action="store", default="config.json")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])
def load_from_module(module):
return importlib.import_module(f'data.{module}').testdata
def load_from_json(jsonfile):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f'data/{jsonfile}.json')) as file:
return jsonpickle.decode(file.read())
|
normal
|
{
"blob_id": "0c0fb3bfb81be5ef6a60584eafeefec61f171679",
"index": 9124,
"step-1": "<mask token>\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\n<mask token>\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption('--browser')\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(\n __file__)), request.config.getoption('--config'))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config['baseUrl'])\n fixture.session.ensure_login(name=config['login'], pwd=config['password'])\n return fixture\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\n<mask token>\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-3": "<mask token>\n\n\n@pytest.fixture\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption('--browser')\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(\n __file__)), request.config.getoption('--config'))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config['baseUrl'])\n fixture.session.ensure_login(name=config['login'], pwd=config['password'])\n return fixture\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser', action='store', default='firefox')\n parser.addoption('--config', action='store', default='config.json')\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-4": "<mask token>\nfixture = None\nconfig = None\n\n\n@pytest.fixture\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption('--browser')\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(\n __file__)), request.config.getoption('--config'))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config['baseUrl'])\n fixture.session.ensure_login(name=config['login'], pwd=config['password'])\n return fixture\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n request.addfinalizer(finalizer)\n return fixture\n\n\ndef pytest_addoption(parser):\n parser.addoption('--browser', action='store', default='firefox')\n parser.addoption('--config', action='store', default='config.json')\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith('data_'):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n elif fixture.startswith('json_'):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in\n testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-5": "import pytest\nimport json\nimport os.path\nimport importlib\nimport jsonpickle\nfrom fixture.application import Application\n\n\nfixture = None\nconfig = None\n\n\n@pytest.fixture\ndef app(request):\n global fixture\n global config\n browser = request.config.getoption(\"--browser\")\n if config is None:\n conf_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), request.config.getoption(\"--config\"))\n with open(conf_file_path) as config_file:\n config = json.load(config_file)\n if fixture is None or not fixture.is_valid():\n fixture = Application(browser=browser, base_url=config[\"baseUrl\"])\n\n fixture.session.ensure_login(name=config[\"login\"], pwd=config[\"password\"])\n\n return fixture\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef stop(request):\n global fixture\n\n def finalizer():\n fixture.session.ensure_logout()\n fixture.destroy()\n\n request.addfinalizer(finalizer)\n return fixture\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\", action=\"store\", default=\"firefox\")\n parser.addoption(\"--config\", action=\"store\", default=\"config.json\")\n\n\ndef pytest_generate_tests(metafunc):\n for fixture in metafunc.fixturenames:\n if fixture.startswith(\"data_\"):\n testdata = load_from_module(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])\n elif fixture.startswith(\"json_\"):\n testdata = load_from_json(fixture[5:])\n metafunc.parametrize(fixture, testdata, ids=[repr(g) for g in testdata])\n\n\ndef load_from_module(module):\n return importlib.import_module(f'data.{module}').testdata\n\n\ndef load_from_json(jsonfile):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), f'data/{jsonfile}.json')) as file:\n return jsonpickle.decode(file.read())\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
import random
import time
from typing import Dict, List, Optional
from bemani.client.base import BaseClient
from bemani.protocol import Node
class ReflecBeatColette(BaseClient):
NAME = 'TEST'
def verify_pcb_boot(self, loc: str) -> None:
call = self.call_node()
pcb = Node.void('pcb')
pcb.set_attribute('method', 'boot')
pcb.add_child(Node.string('lid', loc))
call.add_child(pcb)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/pcb/sinfo/nm")
self.assert_path(resp, "response/pcb/sinfo/cl_enbl")
self.assert_path(resp, "response/pcb/sinfo/cl_h")
self.assert_path(resp, "response/pcb/sinfo/cl_m")
def verify_info_common(self) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'common')
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info/event_ctrl")
self.assert_path(resp, "response/info/item_lock_ctrl")
def verify_info_ranking(self) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'ranking')
info.add_child(Node.s32('ver', 0))
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info/ver")
self.assert_path(resp, "response/info/ranking/weekly/bt")
self.assert_path(resp, "response/info/ranking/weekly/et")
self.assert_path(resp, "response/info/ranking/weekly/new/d/mid")
self.assert_path(resp, "response/info/ranking/weekly/new/d/cnt")
self.assert_path(resp, "response/info/ranking/monthly/bt")
self.assert_path(resp, "response/info/ranking/monthly/et")
self.assert_path(resp, "response/info/ranking/monthly/new/d/mid")
self.assert_path(resp, "response/info/ranking/monthly/new/d/cnt")
self.assert_path(resp, "response/info/ranking/total/bt")
self.assert_path(resp, "response/info/ranking/total/et")
self.assert_path(resp, "response/info/ranking/total/new/d/mid")
self.assert_path(resp, "response/info/ranking/total/new/d/cnt")
def verify_player_start(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'start')
player.add_child(Node.string('rid', refid))
player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))
player.add_child(Node.u16('gp', 10573))
player.add_child(Node.u8_array('la', [16, 0, 0, 0]))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/plyid")
self.assert_path(resp, "response/player/start_time")
self.assert_path(resp, "response/player/event_ctrl")
self.assert_path(resp, "response/player/item_lock_ctrl")
self.assert_path(resp, "response/player/lincle_link_4")
self.assert_path(resp, "response/player/jbrbcollabo")
self.assert_path(resp, "response/player/tricolettepark")
def verify_player_delete(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'delete')
player.add_child(Node.string('rid', refid))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player")
def verify_player_end(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'end')
player.add_child(Node.string('rid', refid))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player")
def verify_player_succeed(self, refid: str) -> None:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'succeed')
player.add_child(Node.string('rid', refid))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/name")
self.assert_path(resp, "response/player/lv")
self.assert_path(resp, "response/player/exp")
self.assert_path(resp, "response/player/grd")
self.assert_path(resp, "response/player/ap")
self.assert_path(resp, "response/player/released")
self.assert_path(resp, "response/player/mrecord")
def verify_player_read(self, refid: str, location: str) -> List[Dict[str, int]]:
call = self.call_node()
player = Node.void('player')
player.set_attribute('method', 'read')
player.add_child(Node.string('rid', refid))
player.add_child(Node.string('lid', location))
player.add_child(Node.s16('ver', 5))
call.add_child(player)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/pdata/account/usrid")
self.assert_path(resp, "response/player/pdata/account/tpc")
self.assert_path(resp, "response/player/pdata/account/dpc")
self.assert_path(resp, "response/player/pdata/account/crd")
self.assert_path(resp, "response/player/pdata/account/brd")
self.assert_path(resp, "response/player/pdata/account/tdc")
self.assert_path(resp, "response/player/pdata/account/intrvld")
self.assert_path(resp, "response/player/pdata/account/ver")
self.assert_path(resp, "response/player/pdata/account/pst")
self.assert_path(resp, "response/player/pdata/account/st")
self.assert_path(resp, "response/player/pdata/base/name")
self.assert_path(resp, "response/player/pdata/base/exp")
self.assert_path(resp, "response/player/pdata/base/lv")
self.assert_path(resp, "response/player/pdata/base/mg")
self.assert_path(resp, "response/player/pdata/base/ap")
self.assert_path(resp, "response/player/pdata/base/tid")
self.assert_path(resp, "response/player/pdata/base/tname")
self.assert_path(resp, "response/player/pdata/base/cmnt")
self.assert_path(resp, "response/player/pdata/base/uattr")
self.assert_path(resp, "response/player/pdata/base/hidden_param")
self.assert_path(resp, "response/player/pdata/base/tbs")
self.assert_path(resp, "response/player/pdata/base/tbs_r")
self.assert_path(resp, "response/player/pdata/rival")
self.assert_path(resp, "response/player/pdata/fav_music_slot")
self.assert_path(resp, "response/player/pdata/custom")
self.assert_path(resp, "response/player/pdata/config")
self.assert_path(resp, "response/player/pdata/stamp")
self.assert_path(resp, "response/player/pdata/released")
self.assert_path(resp, "response/player/pdata/record")
if resp.child_value('player/pdata/base/name') != self.NAME:
raise Exception('Invalid name {} returned on profile read!'.format(resp.child_value('player/pdata/base/name')))
scores = []
for child in resp.child('player/pdata/record').children:
if child.name != 'rec':
continue
score = {
'id': child.child_value('mid'),
'chart': child.child_value('ntgrd'),
'clear_type': child.child_value('ct'),
'achievement_rate': child.child_value('ar'),
'score': child.child_value('scr'),
'combo': child.child_value('cmb'),
'miss_count': child.child_value('ms'),
}
scores.append(score)
return scores
def verify_player_write(self, refid: str, loc: str, scores: List[Dict[str, int]]) -> int:
call = self.call_node()
player = Node.void('player')
call.add_child(player)
player.set_attribute('method', 'write')
pdata = Node.void('pdata')
player.add_child(pdata)
account = Node.void('account')
pdata.add_child(account)
account.add_child(Node.s32('usrid', 0))
account.add_child(Node.s32('plyid', 0))
account.add_child(Node.s32('tpc', 1))
account.add_child(Node.s32('dpc', 1))
account.add_child(Node.s32('crd', 1))
account.add_child(Node.s32('brd', 1))
account.add_child(Node.s32('tdc', 1))
account.add_child(Node.string('rid', refid))
account.add_child(Node.string('lid', loc))
account.add_child(Node.u8('mode', 0))
account.add_child(Node.s16('ver', 5))
account.add_child(Node.bool('pp', True))
account.add_child(Node.bool('ps', True))
account.add_child(Node.s16('pay', 0))
account.add_child(Node.s16('pay_pc', 0))
account.add_child(Node.u64('st', int(time.time() * 1000)))
base = Node.void('base')
pdata.add_child(base)
base.add_child(Node.string('name', self.NAME))
base.add_child(Node.s32('exp', 0))
base.add_child(Node.s32('lv', 1))
base.add_child(Node.s32('mg', -1))
base.add_child(Node.s32('ap', -1))
base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
base.add_child(Node.bool('is_tut', True))
stglog = Node.void('stglog')
pdata.add_child(stglog)
index = 0
for score in scores:
log = Node.void('log')
stglog.add_child(log)
log.add_child(Node.s8('stg', index))
log.add_child(Node.s16('mid', score['id']))
log.add_child(Node.s8('ng', score['chart']))
log.add_child(Node.s8('col', 0))
log.add_child(Node.s8('mt', 7))
log.add_child(Node.s8('rt', 0))
log.add_child(Node.s8('ct', score['clear_type']))
log.add_child(Node.s16('grd', 0))
log.add_child(Node.s16('ar', score['achievement_rate']))
log.add_child(Node.s16('sc', score['score']))
log.add_child(Node.s16('jt_jst', 0))
log.add_child(Node.s16('jt_grt', 0))
log.add_child(Node.s16('jt_gd', 0))
log.add_child(Node.s16('jt_ms', score['miss_count']))
log.add_child(Node.s16('jt_jr', 0))
log.add_child(Node.s16('cmb', score['combo']))
log.add_child(Node.s16('exp', 0))
log.add_child(Node.s32('r_uid', 0))
log.add_child(Node.s32('r_plyid', 0))
log.add_child(Node.s8('r_stg', 0))
log.add_child(Node.s8('r_ct', -1))
log.add_child(Node.s16('r_sc', 0))
log.add_child(Node.s16('r_grd', 0))
log.add_child(Node.s16('r_ar', 0))
log.add_child(Node.s8('r_cpuid', -1))
log.add_child(Node.s32('time', int(time.time())))
log.add_child(Node.s8('decide', 0))
index = index + 1
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/player/uid")
return resp.child_value('player/uid')
def verify_lobby_read(self, location: str, extid: int) -> None:
call = self.call_node()
lobby = Node.void('lobby')
lobby.set_attribute('method', 'read')
lobby.add_child(Node.s32('uid', extid))
lobby.add_child(Node.u8('m_grade', 255))
lobby.add_child(Node.string('lid', location))
lobby.add_child(Node.s32('max', 128))
lobby.add_child(Node.s32_array('friend', []))
lobby.add_child(Node.u8('var', 5))
call.add_child(lobby)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/lobby/interval")
self.assert_path(resp, "response/lobby/interval_p")
def verify_lobby_entry(self, location: str, extid: int) -> int:
call = self.call_node()
lobby = Node.void('lobby')
lobby.set_attribute('method', 'entry')
e = Node.void('e')
lobby.add_child(e)
e.add_child(Node.s32('eid', 0))
e.add_child(Node.u16('mid', 79))
e.add_child(Node.u8('ng', 0))
e.add_child(Node.s32('uid', extid))
e.add_child(Node.s32('uattr', 0))
e.add_child(Node.string('pn', self.NAME))
e.add_child(Node.s16('mg', 255))
e.add_child(Node.s32('mopt', 0))
e.add_child(Node.s32('tid', 0))
e.add_child(Node.string('tn', ''))
e.add_child(Node.s32('topt', 0))
e.add_child(Node.string('lid', location))
e.add_child(Node.string('sn', ''))
e.add_child(Node.u8('pref', 51))
e.add_child(Node.s8('stg', 4))
e.add_child(Node.s8('pside', 0))
e.add_child(Node.s16('eatime', 30))
e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))
e.add_child(Node.u16('gp', 10007))
e.add_child(Node.u8_array('la', [16, 0, 0, 0]))
e.add_child(Node.u8('ver', 5))
lobby.add_child(Node.s32_array('friend', []))
call.add_child(lobby)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/lobby/interval")
self.assert_path(resp, "response/lobby/interval_p")
self.assert_path(resp, "response/lobby/eid")
self.assert_path(resp, "response/lobby/e/eid")
self.assert_path(resp, "response/lobby/e/mid")
self.assert_path(resp, "response/lobby/e/ng")
self.assert_path(resp, "response/lobby/e/uid")
self.assert_path(resp, "response/lobby/e/uattr")
self.assert_path(resp, "response/lobby/e/pn")
self.assert_path(resp, "response/lobby/e/mg")
self.assert_path(resp, "response/lobby/e/mopt")
self.assert_path(resp, "response/lobby/e/tid")
self.assert_path(resp, "response/lobby/e/tn")
self.assert_path(resp, "response/lobby/e/topt")
self.assert_path(resp, "response/lobby/e/lid")
self.assert_path(resp, "response/lobby/e/sn")
self.assert_path(resp, "response/lobby/e/pref")
self.assert_path(resp, "response/lobby/e/stg")
self.assert_path(resp, "response/lobby/e/pside")
self.assert_path(resp, "response/lobby/e/eatime")
self.assert_path(resp, "response/lobby/e/ga")
self.assert_path(resp, "response/lobby/e/gp")
self.assert_path(resp, "response/lobby/e/la")
self.assert_path(resp, "response/lobby/e/ver")
return resp.child_value('lobby/eid')
def verify_lobby_delete(self, eid: int) -> None:
call = self.call_node()
lobby = Node.void('lobby')
lobby.set_attribute('method', 'delete')
lobby.add_child(Node.s32('eid', eid))
call.add_child(lobby)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/lobby")
def verify_pzlcmt_read(self, extid: int) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'pzlcmt_read')
info.add_child(Node.s32('uid', extid))
info.add_child(Node.s32('tid', 0))
info.add_child(Node.s32('time', 0))
info.add_child(Node.s32('limit', 30))
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info/comment/time")
self.assert_path(resp, "response/info/c/uid")
self.assert_path(resp, "response/info/c/name")
self.assert_path(resp, "response/info/c/icon")
self.assert_path(resp, "response/info/c/bln")
self.assert_path(resp, "response/info/c/tid")
self.assert_path(resp, "response/info/c/t_name")
self.assert_path(resp, "response/info/c/pref")
self.assert_path(resp, "response/info/c/time")
self.assert_path(resp, "response/info/c/comment")
self.assert_path(resp, "response/info/c/is_tweet")
# Verify we posted our comment earlier
found = False
for child in resp.child('info').children:
if child.name != 'c':
continue
if child.child_value('uid') == extid:
name = child.child_value('name')
comment = child.child_value('comment')
if name != self.NAME:
raise Exception('Invalid name \'{}\' returned for comment!'.format(name))
if comment != 'アメ〜〜!':
raise Exception('Invalid comment \'{}\' returned for comment!'.format(comment))
found = True
if not found:
raise Exception('Comment we posted was not found!')
def verify_pzlcmt_write(self, extid: int) -> None:
call = self.call_node()
info = Node.void('info')
info.set_attribute('method', 'pzlcmt_write')
info.add_child(Node.s32('uid', extid))
info.add_child(Node.string('name', self.NAME))
info.add_child(Node.s16('icon', 0))
info.add_child(Node.s8('bln', 0))
info.add_child(Node.s32('tid', 0))
info.add_child(Node.string('t_name', ''))
info.add_child(Node.s8('pref', 51))
info.add_child(Node.s32('time', int(time.time())))
info.add_child(Node.string('comment', 'アメ〜〜!'))
info.add_child(Node.bool('is_tweet', True))
call.add_child(info)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/info")
def verify_jbrbcollabo_save(self, refid: str) -> None:
call = self.call_node()
jbrbcollabo = Node.void('jbrbcollabo')
jbrbcollabo.set_attribute('method', 'save')
jbrbcollabo.add_child(Node.string('ref_id', refid))
jbrbcollabo.add_child(Node.u16('cre_count', 0))
call.add_child(jbrbcollabo)
# Swap with server
resp = self.exchange('', call)
# Verify that response is correct
self.assert_path(resp, "response/jbrbcollabo")
def verify(self, cardid: Optional[str]) -> None:
# Verify boot sequence is okay
self.verify_services_get(
expected_services=[
'pcbtracker',
'pcbevent',
'local',
'message',
'facility',
'cardmng',
'package',
'posevent',
'pkglist',
'dlstatus',
'eacoin',
'lobby',
'ntp',
'keepalive'
]
)
paseli_enabled = self.verify_pcbtracker_alive()
self.verify_message_get()
self.verify_package_list()
location = self.verify_facility_get()
self.verify_pcbevent_put()
self.verify_pcb_boot(location)
self.verify_info_common()
# Verify card registration and profile lookup
if cardid is not None:
card = cardid
else:
card = self.random_card()
print("Generated random card ID {} for use.".format(card))
if cardid is None:
self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled)
ref_id = self.verify_cardmng_getrefid(card)
if len(ref_id) != 16:
raise Exception('Invalid refid \'{}\' returned when registering card'.format(ref_id))
if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled):
raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id))
# Always get a player start, regardless of new profile or not
self.verify_player_start(ref_id)
self.verify_player_delete(ref_id)
self.verify_player_succeed(ref_id)
extid = self.verify_player_write(
ref_id,
location,
[{
'id': 0,
'chart': 0,
'clear_type': -1,
'achievement_rate': 0,
'score': 0,
'combo': 0,
'miss_count': 0,
}]
)
else:
print("Skipping new card checks for existing card")
ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled)
# Verify pin handling and return card handling
self.verify_cardmng_authpass(ref_id, correct=True)
self.verify_cardmng_authpass(ref_id, correct=False)
if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled):
raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id))
# Verify lobby functionality
self.verify_lobby_read(location, extid)
eid = self.verify_lobby_entry(location, extid)
self.verify_lobby_delete(eid)
# Verify puzzle comment read and write
self.verify_pzlcmt_write(extid)
self.verify_pzlcmt_read(extid)
# Verify Jubeat/ReflecBeat collabo save
self.verify_jbrbcollabo_save(ref_id)
if cardid is None:
# Verify score saving and updating
for phase in [1, 2]:
if phase == 1:
dummyscores = [
# An okay score on a chart
{
'id': 1,
'chart': 1,
'clear_type': 2,
'achievement_rate': 7543,
'score': 432,
'combo': 123,
'miss_count': 5,
},
# A good score on an easier chart of the same song
{
'id': 1,
'chart': 0,
'clear_type': 4,
'achievement_rate': 9876,
'score': 543,
'combo': 543,
'miss_count': 0,
},
# A bad score on a hard chart
{
'id': 3,
'chart': 2,
'clear_type': 2,
'achievement_rate': 1234,
'score': 123,
'combo': 42,
'miss_count': 54,
},
# A terrible score on an easy chart
{
'id': 3,
'chart': 0,
'clear_type': 2,
'achievement_rate': 1024,
'score': 50,
'combo': 12,
'miss_count': 90,
},
]
if phase == 2:
dummyscores = [
# A better score on the same chart
{
'id': 1,
'chart': 1,
'clear_type': 3,
'achievement_rate': 8765,
'score': 469,
'combo': 468,
'miss_count': 1,
},
# A worse score on another same chart
{
'id': 1,
'chart': 0,
'clear_type': 2,
'achievement_rate': 8765,
'score': 432,
'combo': 321,
'miss_count': 15,
'expected_score': 543,
'expected_clear_type': 4,
'expected_achievement_rate': 9876,
'expected_combo': 543,
'expected_miss_count': 0,
},
]
self.verify_player_write(ref_id, location, dummyscores)
scores = self.verify_player_read(ref_id, location)
for expected in dummyscores:
actual = None
for received in scores:
if received['id'] == expected['id'] and received['chart'] == expected['chart']:
actual = received
break
if actual is None:
raise Exception("Didn't find song {} chart {} in response!".format(expected['id'], expected['chart']))
if 'expected_score' in expected:
expected_score = expected['expected_score']
else:
expected_score = expected['score']
if 'expected_achievement_rate' in expected:
expected_achievement_rate = expected['expected_achievement_rate']
else:
expected_achievement_rate = expected['achievement_rate']
if 'expected_clear_type' in expected:
expected_clear_type = expected['expected_clear_type']
else:
expected_clear_type = expected['clear_type']
if 'expected_combo' in expected:
expected_combo = expected['expected_combo']
else:
expected_combo = expected['combo']
if 'expected_miss_count' in expected:
expected_miss_count = expected['expected_miss_count']
else:
expected_miss_count = expected['miss_count']
if actual['score'] != expected_score:
raise Exception('Expected a score of \'{}\' for song \'{}\' chart \'{}\' but got score \'{}\''.format(
expected_score, expected['id'], expected['chart'], actual['score'],
))
if actual['achievement_rate'] != expected_achievement_rate:
raise Exception('Expected an achievement rate of \'{}\' for song \'{}\' chart \'{}\' but got achievement rate \'{}\''.format(
expected_achievement_rate, expected['id'], expected['chart'], actual['achievement_rate'],
))
if actual['clear_type'] != expected_clear_type:
raise Exception('Expected a clear_type of \'{}\' for song \'{}\' chart \'{}\' but got clear_type \'{}\''.format(
expected_clear_type, expected['id'], expected['chart'], actual['clear_type'],
))
if actual['combo'] != expected_combo:
raise Exception('Expected a combo of \'{}\' for song \'{}\' chart \'{}\' but got combo \'{}\''.format(
expected_combo, expected['id'], expected['chart'], actual['combo'],
))
if actual['miss_count'] != expected_miss_count:
raise Exception('Expected a miss count of \'{}\' for song \'{}\' chart \'{}\' but got miss count \'{}\''.format(
expected_miss_count, expected['id'], expected['chart'], actual['miss_count'],
))
# Sleep so we don't end up putting in score history on the same second
time.sleep(1)
else:
print("Skipping score checks for existing card")
# Verify ending game
self.verify_player_end(ref_id)
# Verify high score tables
self.verify_info_ranking()
# Verify paseli handling
if paseli_enabled:
print("PASELI enabled for this PCBID, executing PASELI checks")
else:
print("PASELI disabled for this PCBID, skipping PASELI checks")
return
sessid, balance = self.verify_eacoin_checkin(card)
if balance == 0:
print("Skipping PASELI consume check because card has 0 balance")
else:
self.verify_eacoin_consume(sessid, balance, random.randint(0, balance))
self.verify_eacoin_checkout(sessid)
|
normal
|
{
"blob_id": "f781377a52400abd617e7f0c5529726120b78476",
"index": 3426,
"step-1": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n <mask token>\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n <mask token>\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n <mask token>\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n <mask token>\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n <mask token>\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n",
"step-2": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n <mask token>\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n\n def verify_info_common(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/event_ctrl')\n self.assert_path(resp, 'response/info/item_lock_ctrl')\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n <mask token>\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n <mask token>\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n <mask token>\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n",
"step-3": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n <mask token>\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n\n def verify_info_common(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/event_ctrl')\n self.assert_path(resp, 'response/info/item_lock_ctrl')\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n\n def verify_player_start(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'start')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n player.add_child(Node.u16('gp', 10573))\n player.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/plyid')\n self.assert_path(resp, 'response/player/start_time')\n self.assert_path(resp, 'response/player/event_ctrl')\n self.assert_path(resp, 'response/player/item_lock_ctrl')\n self.assert_path(resp, 'response/player/lincle_link_4')\n self.assert_path(resp, 'response/player/jbrbcollabo')\n self.assert_path(resp, 'response/player/tricolettepark')\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n <mask token>\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n\n def verify_pzlcmt_write(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_write')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.string('name', self.NAME))\n info.add_child(Node.s16('icon', 0))\n info.add_child(Node.s8('bln', 0))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.string('t_name', ''))\n info.add_child(Node.s8('pref', 51))\n info.add_child(Node.s32('time', int(time.time())))\n info.add_child(Node.string('comment', 'アメ〜〜!'))\n info.add_child(Node.bool('is_tweet', True))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info')\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n",
"step-4": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n NAME = 'TEST'\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n\n def verify_info_common(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/event_ctrl')\n self.assert_path(resp, 'response/info/item_lock_ctrl')\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n\n def verify_player_start(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'start')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n player.add_child(Node.u16('gp', 10573))\n player.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/plyid')\n self.assert_path(resp, 'response/player/start_time')\n self.assert_path(resp, 'response/player/event_ctrl')\n self.assert_path(resp, 'response/player/item_lock_ctrl')\n self.assert_path(resp, 'response/player/lincle_link_4')\n self.assert_path(resp, 'response/player/jbrbcollabo')\n self.assert_path(resp, 'response/player/tricolettepark')\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n\n def verify_lobby_read(self, location: str, extid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'read')\n lobby.add_child(Node.s32('uid', extid))\n lobby.add_child(Node.u8('m_grade', 255))\n lobby.add_child(Node.string('lid', location))\n lobby.add_child(Node.s32('max', 128))\n lobby.add_child(Node.s32_array('friend', []))\n lobby.add_child(Node.u8('var', 5))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n\n def verify_pzlcmt_write(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_write')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.string('name', self.NAME))\n info.add_child(Node.s16('icon', 0))\n info.add_child(Node.s8('bln', 0))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.string('t_name', ''))\n info.add_child(Node.s8('pref', 51))\n info.add_child(Node.s32('time', int(time.time())))\n info.add_child(Node.string('comment', 'アメ〜〜!'))\n info.add_child(Node.bool('is_tweet', True))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info')\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n",
"step-5": "import random\nimport time\nfrom typing import Dict, List, Optional\n\nfrom bemani.client.base import BaseClient\nfrom bemani.protocol import Node\n\n\nclass ReflecBeatColette(BaseClient):\n NAME = 'TEST'\n\n def verify_pcb_boot(self, loc: str) -> None:\n call = self.call_node()\n\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/pcb/sinfo/nm\")\n self.assert_path(resp, \"response/pcb/sinfo/cl_enbl\")\n self.assert_path(resp, \"response/pcb/sinfo/cl_h\")\n self.assert_path(resp, \"response/pcb/sinfo/cl_m\")\n\n def verify_info_common(self) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info/event_ctrl\")\n self.assert_path(resp, \"response/info/item_lock_ctrl\")\n\n def verify_info_ranking(self) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info/ver\")\n self.assert_path(resp, \"response/info/ranking/weekly/bt\")\n self.assert_path(resp, \"response/info/ranking/weekly/et\")\n self.assert_path(resp, \"response/info/ranking/weekly/new/d/mid\")\n self.assert_path(resp, \"response/info/ranking/weekly/new/d/cnt\")\n self.assert_path(resp, \"response/info/ranking/monthly/bt\")\n self.assert_path(resp, \"response/info/ranking/monthly/et\")\n self.assert_path(resp, \"response/info/ranking/monthly/new/d/mid\")\n self.assert_path(resp, \"response/info/ranking/monthly/new/d/cnt\")\n self.assert_path(resp, \"response/info/ranking/total/bt\")\n self.assert_path(resp, \"response/info/ranking/total/et\")\n self.assert_path(resp, \"response/info/ranking/total/new/d/mid\")\n self.assert_path(resp, \"response/info/ranking/total/new/d/cnt\")\n\n def verify_player_start(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'start')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n player.add_child(Node.u16('gp', 10573))\n player.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/plyid\")\n self.assert_path(resp, \"response/player/start_time\")\n self.assert_path(resp, \"response/player/event_ctrl\")\n self.assert_path(resp, \"response/player/item_lock_ctrl\")\n self.assert_path(resp, \"response/player/lincle_link_4\")\n self.assert_path(resp, \"response/player/jbrbcollabo\")\n self.assert_path(resp, \"response/player/tricolettepark\")\n\n def verify_player_delete(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player\")\n\n def verify_player_end(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player\")\n\n def verify_player_succeed(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/name\")\n self.assert_path(resp, \"response/player/lv\")\n self.assert_path(resp, \"response/player/exp\")\n self.assert_path(resp, \"response/player/grd\")\n self.assert_path(resp, \"response/player/ap\")\n self.assert_path(resp, \"response/player/released\")\n self.assert_path(resp, \"response/player/mrecord\")\n\n def verify_player_read(self, refid: str, location: str) -> List[Dict[str, int]]:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/pdata/account/usrid\")\n self.assert_path(resp, \"response/player/pdata/account/tpc\")\n self.assert_path(resp, \"response/player/pdata/account/dpc\")\n self.assert_path(resp, \"response/player/pdata/account/crd\")\n self.assert_path(resp, \"response/player/pdata/account/brd\")\n self.assert_path(resp, \"response/player/pdata/account/tdc\")\n self.assert_path(resp, \"response/player/pdata/account/intrvld\")\n self.assert_path(resp, \"response/player/pdata/account/ver\")\n self.assert_path(resp, \"response/player/pdata/account/pst\")\n self.assert_path(resp, \"response/player/pdata/account/st\")\n self.assert_path(resp, \"response/player/pdata/base/name\")\n self.assert_path(resp, \"response/player/pdata/base/exp\")\n self.assert_path(resp, \"response/player/pdata/base/lv\")\n self.assert_path(resp, \"response/player/pdata/base/mg\")\n self.assert_path(resp, \"response/player/pdata/base/ap\")\n self.assert_path(resp, \"response/player/pdata/base/tid\")\n self.assert_path(resp, \"response/player/pdata/base/tname\")\n self.assert_path(resp, \"response/player/pdata/base/cmnt\")\n self.assert_path(resp, \"response/player/pdata/base/uattr\")\n self.assert_path(resp, \"response/player/pdata/base/hidden_param\")\n self.assert_path(resp, \"response/player/pdata/base/tbs\")\n self.assert_path(resp, \"response/player/pdata/base/tbs_r\")\n self.assert_path(resp, \"response/player/pdata/rival\")\n self.assert_path(resp, \"response/player/pdata/fav_music_slot\")\n self.assert_path(resp, \"response/player/pdata/custom\")\n self.assert_path(resp, \"response/player/pdata/config\")\n self.assert_path(resp, \"response/player/pdata/stamp\")\n self.assert_path(resp, \"response/player/pdata/released\")\n self.assert_path(resp, \"response/player/pdata/record\")\n\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.format(resp.child_value('player/pdata/base/name')))\n\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n\n score = {\n 'id': child.child_value('mid'),\n 'chart': child.child_value('ntgrd'),\n 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'),\n 'score': child.child_value('scr'),\n 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms'),\n }\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[str, int]]) -> int:\n call = self.call_node()\n\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/uid\")\n return resp.child_value('player/uid')\n\n def verify_lobby_read(self, location: str, extid: int) -> None:\n call = self.call_node()\n\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'read')\n lobby.add_child(Node.s32('uid', extid))\n lobby.add_child(Node.u8('m_grade', 255))\n lobby.add_child(Node.string('lid', location))\n lobby.add_child(Node.s32('max', 128))\n lobby.add_child(Node.s32_array('friend', []))\n lobby.add_child(Node.u8('var', 5))\n call.add_child(lobby)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/lobby/interval\")\n self.assert_path(resp, \"response/lobby/interval_p\")\n\n def verify_lobby_entry(self, location: str, extid: int) -> int:\n call = self.call_node()\n\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/lobby/interval\")\n self.assert_path(resp, \"response/lobby/interval_p\")\n self.assert_path(resp, \"response/lobby/eid\")\n self.assert_path(resp, \"response/lobby/e/eid\")\n self.assert_path(resp, \"response/lobby/e/mid\")\n self.assert_path(resp, \"response/lobby/e/ng\")\n self.assert_path(resp, \"response/lobby/e/uid\")\n self.assert_path(resp, \"response/lobby/e/uattr\")\n self.assert_path(resp, \"response/lobby/e/pn\")\n self.assert_path(resp, \"response/lobby/e/mg\")\n self.assert_path(resp, \"response/lobby/e/mopt\")\n self.assert_path(resp, \"response/lobby/e/tid\")\n self.assert_path(resp, \"response/lobby/e/tn\")\n self.assert_path(resp, \"response/lobby/e/topt\")\n self.assert_path(resp, \"response/lobby/e/lid\")\n self.assert_path(resp, \"response/lobby/e/sn\")\n self.assert_path(resp, \"response/lobby/e/pref\")\n self.assert_path(resp, \"response/lobby/e/stg\")\n self.assert_path(resp, \"response/lobby/e/pside\")\n self.assert_path(resp, \"response/lobby/e/eatime\")\n self.assert_path(resp, \"response/lobby/e/ga\")\n self.assert_path(resp, \"response/lobby/e/gp\")\n self.assert_path(resp, \"response/lobby/e/la\")\n self.assert_path(resp, \"response/lobby/e/ver\")\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) -> None:\n call = self.call_node()\n\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/lobby\")\n\n def verify_pzlcmt_read(self, extid: int) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info/comment/time\")\n self.assert_path(resp, \"response/info/c/uid\")\n self.assert_path(resp, \"response/info/c/name\")\n self.assert_path(resp, \"response/info/c/icon\")\n self.assert_path(resp, \"response/info/c/bln\")\n self.assert_path(resp, \"response/info/c/tid\")\n self.assert_path(resp, \"response/info/c/t_name\")\n self.assert_path(resp, \"response/info/c/pref\")\n self.assert_path(resp, \"response/info/c/time\")\n self.assert_path(resp, \"response/info/c/comment\")\n self.assert_path(resp, \"response/info/c/is_tweet\")\n\n # Verify we posted our comment earlier\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception('Invalid name \\'{}\\' returned for comment!'.format(name))\n if comment != 'アメ〜〜!':\n raise Exception('Invalid comment \\'{}\\' returned for comment!'.format(comment))\n found = True\n\n if not found:\n raise Exception('Comment we posted was not found!')\n\n def verify_pzlcmt_write(self, extid: int) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_write')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.string('name', self.NAME))\n info.add_child(Node.s16('icon', 0))\n info.add_child(Node.s8('bln', 0))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.string('t_name', ''))\n info.add_child(Node.s8('pref', 51))\n info.add_child(Node.s32('time', int(time.time())))\n info.add_child(Node.string('comment', 'アメ〜〜!'))\n info.add_child(Node.bool('is_tweet', True))\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info\")\n\n def verify_jbrbcollabo_save(self, refid: str) -> None:\n call = self.call_node()\n\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/jbrbcollabo\")\n\n def verify(self, cardid: Optional[str]) -> None:\n # Verify boot sequence is okay\n self.verify_services_get(\n expected_services=[\n 'pcbtracker',\n 'pcbevent',\n 'local',\n 'message',\n 'facility',\n 'cardmng',\n 'package',\n 'posevent',\n 'pkglist',\n 'dlstatus',\n 'eacoin',\n 'lobby',\n 'ntp',\n 'keepalive'\n ]\n )\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n\n # Verify card registration and profile lookup\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print(\"Generated random card ID {} for use.\".format(card))\n\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception('Invalid refid \\'{}\\' returned when registering card'.format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled):\n raise Exception('Invalid refid \\'{}\\' returned when querying card'.format(ref_id))\n # Always get a player start, regardless of new profile or not\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(\n ref_id,\n location,\n [{\n 'id': 0,\n 'chart': 0,\n 'clear_type': -1,\n 'achievement_rate': 0,\n 'score': 0,\n 'combo': 0,\n 'miss_count': 0,\n }]\n )\n else:\n print(\"Skipping new card checks for existing card\")\n ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled)\n\n # Verify pin handling and return card handling\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled):\n raise Exception('Invalid refid \\'{}\\' returned when querying card'.format(ref_id))\n\n # Verify lobby functionality\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n\n # Verify puzzle comment read and write\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n\n # Verify Jubeat/ReflecBeat collabo save\n self.verify_jbrbcollabo_save(ref_id)\n\n if cardid is None:\n # Verify score saving and updating\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [\n # An okay score on a chart\n {\n 'id': 1,\n 'chart': 1,\n 'clear_type': 2,\n 'achievement_rate': 7543,\n 'score': 432,\n 'combo': 123,\n 'miss_count': 5,\n },\n # A good score on an easier chart of the same song\n {\n 'id': 1,\n 'chart': 0,\n 'clear_type': 4,\n 'achievement_rate': 9876,\n 'score': 543,\n 'combo': 543,\n 'miss_count': 0,\n },\n # A bad score on a hard chart\n {\n 'id': 3,\n 'chart': 2,\n 'clear_type': 2,\n 'achievement_rate': 1234,\n 'score': 123,\n 'combo': 42,\n 'miss_count': 54,\n },\n # A terrible score on an easy chart\n {\n 'id': 3,\n 'chart': 0,\n 'clear_type': 2,\n 'achievement_rate': 1024,\n 'score': 50,\n 'combo': 12,\n 'miss_count': 90,\n },\n ]\n if phase == 2:\n dummyscores = [\n # A better score on the same chart\n {\n 'id': 1,\n 'chart': 1,\n 'clear_type': 3,\n 'achievement_rate': 8765,\n 'score': 469,\n 'combo': 468,\n 'miss_count': 1,\n },\n # A worse score on another same chart\n {\n 'id': 1,\n 'chart': 0,\n 'clear_type': 2,\n 'achievement_rate': 8765,\n 'score': 432,\n 'combo': 321,\n 'miss_count': 15,\n 'expected_score': 543,\n 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876,\n 'expected_combo': 543,\n 'expected_miss_count': 0,\n },\n ]\n self.verify_player_write(ref_id, location, dummyscores)\n\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received['chart'] == expected['chart']:\n actual = received\n break\n\n if actual is None:\n raise Exception(\"Didn't find song {} chart {} in response!\".format(expected['id'], expected['chart']))\n\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected['expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate']\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n\n if actual['score'] != expected_score:\n raise Exception('Expected a score of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got score \\'{}\\''.format(\n expected_score, expected['id'], expected['chart'], actual['score'],\n ))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception('Expected an achievement rate of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got achievement rate \\'{}\\''.format(\n expected_achievement_rate, expected['id'], expected['chart'], actual['achievement_rate'],\n ))\n if actual['clear_type'] != expected_clear_type:\n raise Exception('Expected a clear_type of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got clear_type \\'{}\\''.format(\n expected_clear_type, expected['id'], expected['chart'], actual['clear_type'],\n ))\n if actual['combo'] != expected_combo:\n raise Exception('Expected a combo of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got combo \\'{}\\''.format(\n expected_combo, expected['id'], expected['chart'], actual['combo'],\n ))\n if actual['miss_count'] != expected_miss_count:\n raise Exception('Expected a miss count of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got miss count \\'{}\\''.format(\n expected_miss_count, expected['id'], expected['chart'], actual['miss_count'],\n ))\n\n # Sleep so we don't end up putting in score history on the same second\n time.sleep(1)\n\n else:\n print(\"Skipping score checks for existing card\")\n\n # Verify ending game\n self.verify_player_end(ref_id)\n\n # Verify high score tables\n self.verify_info_ranking()\n\n # Verify paseli handling\n if paseli_enabled:\n print(\"PASELI enabled for this PCBID, executing PASELI checks\")\n else:\n print(\"PASELI disabled for this PCBID, skipping PASELI checks\")\n return\n\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print(\"Skipping PASELI consume check because card has 0 balance\")\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0, balance))\n self.verify_eacoin_checkout(sessid)\n",
"step-ids": [
13,
14,
16,
18,
20
]
}
|
[
13,
14,
16,
18,
20
] |
print("Enter string:")
s=input()
a = s.lower()
vowels = "aeiou"
consonants = "bcdfghjklmnpqrstvwxyz"
digits = "1234567890"
whitespace = " "
c = 0
v = 0
d = 0
ws= 0
for i in a:
if i in vowels:
v+=1
elif i in consonants:
c+=1
elif i in digits:
d+=1
elif i in whitespace:
ws+=1
print(v,c,d,ws)
|
normal
|
{
"blob_id": "088c77e090d444e7057a91cac606995fb523c8ef",
"index": 3079,
"step-1": "<mask token>\n",
"step-2": "print('Enter string:')\n<mask token>\nfor i in a:\n if i in vowels:\n v += 1\n elif i in consonants:\n c += 1\n elif i in digits:\n d += 1\n elif i in whitespace:\n ws += 1\nprint(v, c, d, ws)\n",
"step-3": "print('Enter string:')\ns = input()\na = s.lower()\nvowels = 'aeiou'\nconsonants = 'bcdfghjklmnpqrstvwxyz'\ndigits = '1234567890'\nwhitespace = ' '\nc = 0\nv = 0\nd = 0\nws = 0\nfor i in a:\n if i in vowels:\n v += 1\n elif i in consonants:\n c += 1\n elif i in digits:\n d += 1\n elif i in whitespace:\n ws += 1\nprint(v, c, d, ws)\n",
"step-4": "print(\"Enter string:\")\ns=input()\na = s.lower()\n\n\nvowels = \"aeiou\"\nconsonants = \"bcdfghjklmnpqrstvwxyz\"\ndigits = \"1234567890\"\nwhitespace = \" \"\n\nc = 0\nv = 0\nd = 0\nws= 0\n\nfor i in a:\n if i in vowels:\n v+=1\n elif i in consonants:\n c+=1\n elif i in digits:\n d+=1\n elif i in whitespace:\n ws+=1\n\nprint(v,c,d,ws)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Alignment_Corrector(Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Alignment_Corrector(Module):
def __init__(self):
self.din = din = Signal(32)
self.aligned = aligned = Signal()
self.dout = dout = Signal(32)
self.correction_done = Signal()
first_half = Signal(16)
first_half1 = Signal(16)
second_half = Signal(16)
self.submodules.fsm = FSM(reset_state='IDLE')
self.fsm.act('IDLE', If(aligned, NextState('INIT')))
self.fsm.act('INIT', NextState('DONE'), NextValue(first_half, din[
16:]), NextValue(self.correction_done, 1))
self.fsm.act('DONE', dout.eq(Cat(first_half, din[:16])), NextValue(
first_half, din[16:]), NextState('DONE'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from migen import *
from migen.fhdl import verilog
class Alignment_Corrector(Module):
def __init__(self):
self.din = din = Signal(32)
self.aligned = aligned = Signal()
self.dout = dout = Signal(32)
self.correction_done = Signal()
first_half = Signal(16)
first_half1 = Signal(16)
second_half = Signal(16)
self.submodules.fsm = FSM(reset_state='IDLE')
self.fsm.act('IDLE', If(aligned, NextState('INIT')))
self.fsm.act('INIT', NextState('DONE'), NextValue(first_half, din[
16:]), NextValue(self.correction_done, 1))
self.fsm.act('DONE', dout.eq(Cat(first_half, din[:16])), NextValue(
first_half, din[16:]), NextState('DONE'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from migen import *
from migen.fhdl import verilog
class Alignment_Corrector(Module):
def __init__(self):
self.din=din=Signal(32)
self.aligned=aligned=Signal()
self.dout=dout=Signal(32)
self.correction_done=Signal()
# # #
first_half=Signal(16)
first_half1=Signal(16)
second_half=Signal(16)
self.submodules.fsm=FSM(reset_state="IDLE")
self.fsm.act("IDLE",
If(aligned,
NextState("INIT"),
)
)
self.fsm.act("INIT",
NextState("DONE"),
NextValue(first_half,din[16:]),
NextValue(self.correction_done,1)
)
self.fsm.act("DONE",
dout.eq(Cat(first_half,din[:16])),
NextValue(first_half,din[16:]),
NextState("DONE")
)
#example = Alignment_Corrector()
#verilog.convert(example, {example.din, example.dout, example.aligned, example.correction_done}).write("alignment_corrector.v")
"""
def tb(dut):
yield
for i in range(10):
yield dut.din.eq(0x62cfa9d274)
yield dut.aligned.eq(1)
yield
yield dut.din.eq(0x9d30562d8b)
yield
dut=Alignment_Corrector()
run_simulation(dut,tb(dut),vcd_name="alignment_tb.vcd")
"""
|
flexible
|
{
"blob_id": "f3eed00a58491f36778b3a710d2f46be093d6eda",
"index": 6320,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Alignment_Corrector(Module):\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Alignment_Corrector(Module):\n\n def __init__(self):\n self.din = din = Signal(32)\n self.aligned = aligned = Signal()\n self.dout = dout = Signal(32)\n self.correction_done = Signal()\n first_half = Signal(16)\n first_half1 = Signal(16)\n second_half = Signal(16)\n self.submodules.fsm = FSM(reset_state='IDLE')\n self.fsm.act('IDLE', If(aligned, NextState('INIT')))\n self.fsm.act('INIT', NextState('DONE'), NextValue(first_half, din[\n 16:]), NextValue(self.correction_done, 1))\n self.fsm.act('DONE', dout.eq(Cat(first_half, din[:16])), NextValue(\n first_half, din[16:]), NextState('DONE'))\n\n\n<mask token>\n",
"step-4": "from migen import *\nfrom migen.fhdl import verilog\n\n\nclass Alignment_Corrector(Module):\n\n def __init__(self):\n self.din = din = Signal(32)\n self.aligned = aligned = Signal()\n self.dout = dout = Signal(32)\n self.correction_done = Signal()\n first_half = Signal(16)\n first_half1 = Signal(16)\n second_half = Signal(16)\n self.submodules.fsm = FSM(reset_state='IDLE')\n self.fsm.act('IDLE', If(aligned, NextState('INIT')))\n self.fsm.act('INIT', NextState('DONE'), NextValue(first_half, din[\n 16:]), NextValue(self.correction_done, 1))\n self.fsm.act('DONE', dout.eq(Cat(first_half, din[:16])), NextValue(\n first_half, din[16:]), NextState('DONE'))\n\n\n<mask token>\n",
"step-5": "from migen import *\nfrom migen.fhdl import verilog\n\nclass Alignment_Corrector(Module):\n\tdef __init__(self):\n\t\tself.din=din=Signal(32)\n\t\tself.aligned=aligned=Signal()\n\t\tself.dout=dout=Signal(32)\n\t\tself.correction_done=Signal()\n\t\t#\t#\t#\n\t\tfirst_half=Signal(16)\n\t\tfirst_half1=Signal(16)\n\t\tsecond_half=Signal(16)\n\t\tself.submodules.fsm=FSM(reset_state=\"IDLE\")\n\t\tself.fsm.act(\"IDLE\",\n\t\t\tIf(aligned, \n\t\t\t\tNextState(\"INIT\"),\n\t\t\t)\n\t\t)\n\t\tself.fsm.act(\"INIT\",\n\t\t\tNextState(\"DONE\"),\n\t\t\tNextValue(first_half,din[16:]),\n\t\t\tNextValue(self.correction_done,1)\n\t\t)\n\t\tself.fsm.act(\"DONE\",\n\t\t\tdout.eq(Cat(first_half,din[:16])),\n\t\t\tNextValue(first_half,din[16:]),\n\t\t\tNextState(\"DONE\")\n\n\t\t)\n\t\n#example = Alignment_Corrector()\n#verilog.convert(example, {example.din, example.dout, example.aligned, example.correction_done}).write(\"alignment_corrector.v\")\n\n\n\n\t\n\"\"\"\ndef tb(dut):\n\tyield\t\n\tfor i in range(10):\n\t\tyield dut.din.eq(0x62cfa9d274) \n\t\tyield dut.aligned.eq(1)\n\t\tyield\n\t\tyield dut.din.eq(0x9d30562d8b)\n\t\tyield\n\ndut=Alignment_Corrector()\nrun_simulation(dut,tb(dut),vcd_name=\"alignment_tb.vcd\")\n\"\"\"\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__: List[str]
record: Any
recarray: Any
format_parser: Any
fromarrays: Any
fromrecords: Any
fromstring: Any
fromfile: Any
array: Any
<|reserved_special_token_1|>
from typing import Any, List
__all__: List[str]
record: Any
recarray: Any
format_parser: Any
fromarrays: Any
fromrecords: Any
fromstring: Any
fromfile: Any
array: Any
|
flexible
|
{
"blob_id": "2e1ad83bcd16f59338032f8ad5ca8ebd74e92200",
"index": 6664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__: List[str]\nrecord: Any\nrecarray: Any\nformat_parser: Any\nfromarrays: Any\nfromrecords: Any\nfromstring: Any\nfromfile: Any\narray: Any\n",
"step-3": "from typing import Any, List\n__all__: List[str]\nrecord: Any\nrecarray: Any\nformat_parser: Any\nfromarrays: Any\nfromrecords: Any\nfromstring: Any\nfromfile: Any\narray: Any\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from sqlalchemy import select, update
from sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger
from sqlalchemy import create_engine, MetaData
import API_and_Database_function as func
import pandas as pd
import re
connection, Twitter_Sentiment_Analysis = func.Database_Acces("mysql://root@localhost/sentiment?charset=utf8mb4", 'utf8' , 'Twitter_Sentiment_Analysis4' )
stmt = "SET NAMES 'UTF8';"
connection.execute(stmt)
func.update_annotations_db(Twitter_Sentiment_Analysis, connection, "Export_csv5.csv")
|
normal
|
{
"blob_id": "a558b42106b036719fe38ee6efd1c5b933290f52",
"index": 47,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n",
"step-3": "<mask token>\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\n 'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',\n 'Twitter_Sentiment_Analysis4')\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n",
"step-4": "from sqlalchemy import select, update\nfrom sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger\nfrom sqlalchemy import create_engine, MetaData\nimport API_and_Database_function as func\nimport pandas as pd\nimport re\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\n 'mysql://root@localhost/sentiment?charset=utf8mb4', 'utf8',\n 'Twitter_Sentiment_Analysis4')\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection,\n 'Export_csv5.csv')\n",
"step-5": "#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom sqlalchemy import select, update\nfrom sqlalchemy import Table, Column, String, Integer, Float, Boolean, Date, BigInteger\nfrom sqlalchemy import create_engine, MetaData\nimport API_and_Database_function as func\nimport pandas as pd\nimport re\n\n\nconnection, Twitter_Sentiment_Analysis = func.Database_Acces(\"mysql://root@localhost/sentiment?charset=utf8mb4\", 'utf8' , 'Twitter_Sentiment_Analysis4' )\nstmt = \"SET NAMES 'UTF8';\"\nconnection.execute(stmt)\nfunc.update_annotations_db(Twitter_Sentiment_Analysis, connection, \"Export_csv5.csv\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sample(x, arg=None):
if arg is None:
arg = []
arg.append(x)
return arg
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def sample(x, arg=[]):
arg.append(x)
return arg
<|reserved_special_token_0|>
def sample(x, arg=None):
if arg is None:
arg = []
arg.append(x)
return arg
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def sample(x, arg=[]):
arg.append(x)
return arg
print(sample(1))
print(sample(2))
print(sample(3))
def sample(x, arg=None):
if arg is None:
arg = []
arg.append(x)
return arg
print(sample(1))
print(sample(2))
print(sample(3))
<|reserved_special_token_1|>
#デフォルト引数の破壊
#以下、破壊的な操作
def sample(x, arg=[]):
arg.append(x)
return arg
print(sample(1))
print(sample(2))
print(sample(3))
#対策・・・デフォルト引数にはイミュータブルなものを使用する
def sample(x, arg=None):
if arg is None:
arg = []
arg.append(x)
return arg
print(sample(1))
print(sample(2))
print(sample(3))
|
flexible
|
{
"blob_id": "1b645ab0a48b226e26009f76ea49fd3f10f5cc7b",
"index": 3880,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sample(x, arg=None):\n if arg is None:\n arg = []\n arg.append(x)\n return arg\n\n\n<mask token>\n",
"step-3": "def sample(x, arg=[]):\n arg.append(x)\n return arg\n\n\n<mask token>\n\n\ndef sample(x, arg=None):\n if arg is None:\n arg = []\n arg.append(x)\n return arg\n\n\n<mask token>\n",
"step-4": "def sample(x, arg=[]):\n arg.append(x)\n return arg\n\n\nprint(sample(1))\nprint(sample(2))\nprint(sample(3))\n\n\ndef sample(x, arg=None):\n if arg is None:\n arg = []\n arg.append(x)\n return arg\n\n\nprint(sample(1))\nprint(sample(2))\nprint(sample(3))\n",
"step-5": "#デフォルト引数の破壊\r\n#以下、破壊的な操作\r\ndef sample(x, arg=[]):\r\n arg.append(x)\r\n return arg\r\n\r\nprint(sample(1))\r\nprint(sample(2))\r\nprint(sample(3))\r\n\r\n#対策・・・デフォルト引数にはイミュータブルなものを使用する\r\ndef sample(x, arg=None):\r\n if arg is None:\r\n arg = []\r\n \r\n arg.append(x)\r\n return arg\r\n\r\nprint(sample(1))\r\nprint(sample(2))\r\nprint(sample(3))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def game_manager(info_list):
dictionary = {}
for piece_info in info_list:
piece_info = piece_info.split('||')
piece_info[2] = int(piece_info[2])
if piece_info[2] not in dictionary:
dictionary[piece_info[2]] = {(piece_info[1],piece_info[0])}
dictionary[piece_info[2]].add((piece_info[1],piece_info[0]))
print(dictionary)
info = ['Final Fantasy VII||SCEA||1997','Mirror’s Edge||Electronic Arts||2008','GTA 4||Rockstar Games||2008','Grandia||SCEA||1997', \
'Half Life 2||Valve||2004']
game_manager(info)
|
normal
|
{
"blob_id": "a382edb861a43ac3065a781ea996a8d1dd819954",
"index": 6649,
"step-1": "<mask token>\n",
"step-2": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))\n print(dictionary)\n\n\n<mask token>\n",
"step-3": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))\n print(dictionary)\n\n\n<mask token>\ngame_manager(info)\n",
"step-4": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1], piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1], piece_info[0]))\n print(dictionary)\n\n\ninfo = ['Final Fantasy VII||SCEA||1997',\n 'Mirror’s Edge||Electronic Arts||2008', 'GTA 4||Rockstar Games||2008',\n 'Grandia||SCEA||1997', 'Half Life 2||Valve||2004']\ngame_manager(info)\n",
"step-5": "def game_manager(info_list):\n dictionary = {}\n for piece_info in info_list:\n piece_info = piece_info.split('||')\n piece_info[2] = int(piece_info[2])\n if piece_info[2] not in dictionary:\n dictionary[piece_info[2]] = {(piece_info[1],piece_info[0])}\n dictionary[piece_info[2]].add((piece_info[1],piece_info[0]))\n print(dictionary)\n\n\ninfo = ['Final Fantasy VII||SCEA||1997','Mirror’s Edge||Electronic Arts||2008','GTA 4||Rockstar Games||2008','Grandia||SCEA||1997', \\\n'Half Life 2||Valve||2004']\n\ngame_manager(info)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with tf.name_scope('input_data'):
(iterate_data, sub_images, sub_depths, sub_images_placeholder,
sub_depths_placeholder) = rd.read_debug_data()
sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:
303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')
sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,
:, :], -1, 0), dtype=tf.float32, name='images_fine')
depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55,
0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')
sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],
dtype=tf.float32, name='images_coarse')
sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],
dtype=tf.float32, name='images_fine')
depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:
55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')
tf.summary.image(name='images_coarse', tensor=sub_images_coarse,
max_outputs=1)
tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)
<|reserved_special_token_0|>
with tf.Session() as sess:
writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(fine_depthmap_predictions)
fine_cost = nf.get_cost_function(depthmaps_predicted=
fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)
optimizer_fine = nf.get_fine_optimizer(fine_cost)
sess.run(tf.global_variables_initializer())
sess.run(optimizer_fine)
merged_summary = sess.run(tf.summary.merge_all())
writer.add_summary(merged_summary)
writer.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with tf.name_scope('input_data'):
(iterate_data, sub_images, sub_depths, sub_images_placeholder,
sub_depths_placeholder) = rd.read_debug_data()
sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:
303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')
sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,
:, :], -1, 0), dtype=tf.float32, name='images_fine')
depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55,
0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')
sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],
dtype=tf.float32, name='images_coarse')
sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],
dtype=tf.float32, name='images_fine')
depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:
55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')
tf.summary.image(name='images_coarse', tensor=sub_images_coarse,
max_outputs=1)
tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)
coarse_depthmap_predictions = nf.get_coarse_network(input_placeholder=
sub_images_coarse)
fine_depthmap_predictions = nf.get_fine_network(input_placeholder=
sub_images_fine, coarse_prediction=coarse_depthmap_predictions)
with tf.Session() as sess:
writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(fine_depthmap_predictions)
fine_cost = nf.get_cost_function(depthmaps_predicted=
fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)
optimizer_fine = nf.get_fine_optimizer(fine_cost)
sess.run(tf.global_variables_initializer())
sess.run(optimizer_fine)
merged_summary = sess.run(tf.summary.merge_all())
writer.add_summary(merged_summary)
writer.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import network_functions_2_elin as nf
import tensorflow as tf
import numpy as np
import read_data as rd
with tf.name_scope('input_data'):
(iterate_data, sub_images, sub_depths, sub_images_placeholder,
sub_depths_placeholder) = rd.read_debug_data()
sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:
303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')
sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,
:, :], -1, 0), dtype=tf.float32, name='images_fine')
depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55,
0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')
sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],
dtype=tf.float32, name='images_coarse')
sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],
dtype=tf.float32, name='images_fine')
depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:
55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')
tf.summary.image(name='images_coarse', tensor=sub_images_coarse,
max_outputs=1)
tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)
coarse_depthmap_predictions = nf.get_coarse_network(input_placeholder=
sub_images_coarse)
fine_depthmap_predictions = nf.get_fine_network(input_placeholder=
sub_images_fine, coarse_prediction=coarse_depthmap_predictions)
with tf.Session() as sess:
writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(fine_depthmap_predictions)
fine_cost = nf.get_cost_function(depthmaps_predicted=
fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)
optimizer_fine = nf.get_fine_optimizer(fine_cost)
sess.run(tf.global_variables_initializer())
sess.run(optimizer_fine)
merged_summary = sess.run(tf.summary.merge_all())
writer.add_summary(merged_summary)
writer.close()
<|reserved_special_token_1|>
'''
"MAIN" module
All operations are added to the defaultgraph.
Network functions are found in module network_functions_2
Display graph in tensorboard by opening a new terminal and write "tensorboard --logdir=tensorbaord/debug/01/" where
the last number depends on which directory the current graph is saved in (see line 35 in this module where the
FileWriter is created). After this, open the local webpage displayed in the terminal (looks something like http://OSCAR-LENOVO-LAPTOP:6006)
but with your own username.
'''
import network_functions_2_elin as nf
import tensorflow as tf
import numpy as np
import read_data as rd
with tf.name_scope("input_data"):
# import images
(iterate_data, sub_images, sub_depths, sub_images_placeholder, sub_depths_placeholder) = rd.read_debug_data()
sub_images_coarse = tf.constant(value = np.moveaxis(sub_images[0:223, 0:303, :, :], -1, 0), dtype = tf.float32, name = "images_coarse")
sub_images_fine = tf.constant(value = np.moveaxis(sub_images[0:227, 0:303, :, :], -1, 0), dtype = tf.float32, name = "images_fine")
depthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[0:55, 0:74, :], -1, 0), dtype = tf.float32, name = "depthmaps_groundtruth")
sub_images_coarse = tf.constant(value = sub_images[:,0:223, 0:303, :], dtype = tf.float32, name = "images_coarse")
sub_images_fine = tf.constant(value = sub_images[:, 0:227, 0:303, :], dtype = tf.float32, name = "images_fine")
depthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[:,0:55, 0:74, :], -1, 0), dtype = tf.float32, name = "depthmaps_groundtruth")
# print sample images to tensorboard
tf.summary.image(name = "images_coarse", tensor = sub_images_coarse, max_outputs = 1)
tf.summary.image(name = "images_fine", tensor = sub_images_fine, max_outputs = 1)
# define coarse and fine networks
coarse_depthmap_predictions = nf.get_coarse_network(input_placeholder = sub_images_coarse)
fine_depthmap_predictions = nf.get_fine_network(input_placeholder = sub_images_fine, coarse_prediction = coarse_depthmap_predictions)
# Session: tensorflow calculates all values using the input
with tf.Session() as sess:
# tensorboard writer CHANGE THE DIR NUMBER EVERY RUN (27 -> 28 -> 29 etc.)
# tensorboard/* in .gitignore
writer = tf.summary.FileWriter("./tensorboard/debug/07", sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(fine_depthmap_predictions)
# compute cost function
fine_cost = nf.get_cost_function(depthmaps_predicted = fine_depthmap_predictions,
depthmaps_groundtruth = depthmaps_groundtruth)
# calculate and run optimizer
optimizer_fine = nf.get_fine_optimizer(fine_cost)
sess.run(tf.global_variables_initializer())
sess.run(optimizer_fine)
# this code makes sure that all info gets written to tensorboard
merged_summary = sess.run(tf.summary.merge_all())
writer.add_summary(merged_summary)
writer.close()
|
flexible
|
{
"blob_id": "8a2cf1d550a593beae579104413b424e007d511f",
"index": 9048,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.name_scope('input_data'):\n (iterate_data, sub_images, sub_depths, sub_images_placeholder,\n sub_depths_placeholder) = rd.read_debug_data()\n sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:\n 303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,\n :, :], -1, 0), dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55, \n 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],\n dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],\n dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:\n 55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n tf.summary.image(name='images_coarse', tensor=sub_images_coarse,\n max_outputs=1)\n tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)\n<mask token>\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(fine_depthmap_predictions)\n fine_cost = nf.get_cost_function(depthmaps_predicted=\n fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)\n optimizer_fine = nf.get_fine_optimizer(fine_cost)\n sess.run(tf.global_variables_initializer())\n sess.run(optimizer_fine)\n merged_summary = sess.run(tf.summary.merge_all())\n writer.add_summary(merged_summary)\n writer.close()\n",
"step-3": "<mask token>\nwith tf.name_scope('input_data'):\n (iterate_data, sub_images, sub_depths, sub_images_placeholder,\n sub_depths_placeholder) = rd.read_debug_data()\n sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:\n 303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,\n :, :], -1, 0), dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55, \n 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],\n dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],\n dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:\n 55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n tf.summary.image(name='images_coarse', tensor=sub_images_coarse,\n max_outputs=1)\n tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)\ncoarse_depthmap_predictions = nf.get_coarse_network(input_placeholder=\n sub_images_coarse)\nfine_depthmap_predictions = nf.get_fine_network(input_placeholder=\n sub_images_fine, coarse_prediction=coarse_depthmap_predictions)\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(fine_depthmap_predictions)\n fine_cost = nf.get_cost_function(depthmaps_predicted=\n fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)\n optimizer_fine = nf.get_fine_optimizer(fine_cost)\n sess.run(tf.global_variables_initializer())\n sess.run(optimizer_fine)\n merged_summary = sess.run(tf.summary.merge_all())\n writer.add_summary(merged_summary)\n writer.close()\n",
"step-4": "<mask token>\nimport network_functions_2_elin as nf\nimport tensorflow as tf\nimport numpy as np\nimport read_data as rd\nwith tf.name_scope('input_data'):\n (iterate_data, sub_images, sub_depths, sub_images_placeholder,\n sub_depths_placeholder) = rd.read_debug_data()\n sub_images_coarse = tf.constant(value=np.moveaxis(sub_images[0:223, 0:\n 303, :, :], -1, 0), dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=np.moveaxis(sub_images[0:227, 0:303,\n :, :], -1, 0), dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[0:55, \n 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n sub_images_coarse = tf.constant(value=sub_images[:, 0:223, 0:303, :],\n dtype=tf.float32, name='images_coarse')\n sub_images_fine = tf.constant(value=sub_images[:, 0:227, 0:303, :],\n dtype=tf.float32, name='images_fine')\n depthmaps_groundtruth = tf.constant(value=np.moveaxis(sub_depths[:, 0:\n 55, 0:74, :], -1, 0), dtype=tf.float32, name='depthmaps_groundtruth')\n tf.summary.image(name='images_coarse', tensor=sub_images_coarse,\n max_outputs=1)\n tf.summary.image(name='images_fine', tensor=sub_images_fine, max_outputs=1)\ncoarse_depthmap_predictions = nf.get_coarse_network(input_placeholder=\n sub_images_coarse)\nfine_depthmap_predictions = nf.get_fine_network(input_placeholder=\n sub_images_fine, coarse_prediction=coarse_depthmap_predictions)\nwith tf.Session() as sess:\n writer = tf.summary.FileWriter('./tensorboard/debug/07', sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(fine_depthmap_predictions)\n fine_cost = nf.get_cost_function(depthmaps_predicted=\n fine_depthmap_predictions, depthmaps_groundtruth=depthmaps_groundtruth)\n optimizer_fine = nf.get_fine_optimizer(fine_cost)\n sess.run(tf.global_variables_initializer())\n sess.run(optimizer_fine)\n merged_summary = sess.run(tf.summary.merge_all())\n writer.add_summary(merged_summary)\n writer.close()\n",
"step-5": "'''\n\"MAIN\" module \nAll operations are added to the defaultgraph.\nNetwork functions are found in module network_functions_2 \nDisplay graph in tensorboard by opening a new terminal and write \"tensorboard --logdir=tensorbaord/debug/01/\" where \nthe last number depends on which directory the current graph is saved in (see line 35 in this module where the \nFileWriter is created). After this, open the local webpage displayed in the terminal (looks something like http://OSCAR-LENOVO-LAPTOP:6006) \nbut with your own username. \n'''\n\nimport network_functions_2_elin as nf\nimport tensorflow as tf\nimport numpy as np\nimport read_data as rd\n\n\nwith tf.name_scope(\"input_data\"):\n\t# import images \n\t(iterate_data, sub_images, sub_depths, sub_images_placeholder, sub_depths_placeholder) = rd.read_debug_data()\t\n\tsub_images_coarse = tf.constant(value = np.moveaxis(sub_images[0:223, 0:303, :, :], -1, 0), dtype = tf.float32, name = \"images_coarse\") \n\tsub_images_fine = tf.constant(value = np.moveaxis(sub_images[0:227, 0:303, :, :], -1, 0), dtype = tf.float32, name = \"images_fine\") \n\tdepthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[0:55, 0:74, :], -1, 0), dtype = tf.float32, name = \"depthmaps_groundtruth\")\n\n\tsub_images_coarse = tf.constant(value = sub_images[:,0:223, 0:303, :], dtype = tf.float32, name = \"images_coarse\") \n\tsub_images_fine = tf.constant(value = sub_images[:, 0:227, 0:303, :], dtype = tf.float32, name = \"images_fine\") \n\tdepthmaps_groundtruth = tf.constant(value = np.moveaxis(sub_depths[:,0:55, 0:74, :], -1, 0), dtype = tf.float32, name = \"depthmaps_groundtruth\")\n\t\n\t# print sample images to tensorboard \n\ttf.summary.image(name = \"images_coarse\", tensor = sub_images_coarse, max_outputs = 1)\n\ttf.summary.image(name = \"images_fine\", tensor = sub_images_fine, max_outputs = 1)\n\n\n# define coarse and fine networks \ncoarse_depthmap_predictions = nf.get_coarse_network(input_placeholder = sub_images_coarse)\nfine_depthmap_predictions = nf.get_fine_network(input_placeholder = sub_images_fine, coarse_prediction = coarse_depthmap_predictions)\n\n\n# Session: tensorflow calculates all values using the input \nwith tf.Session() as sess:\n\n\t# tensorboard writer CHANGE THE DIR NUMBER EVERY RUN (27 -> 28 -> 29 etc.)\n\t# tensorboard/* in .gitignore \n\twriter = tf.summary.FileWriter(\"./tensorboard/debug/07\", sess.graph) \t\n\n\tsess.run(tf.global_variables_initializer())\t\n\t\t\t\t\t\t\t \n\tsess.run(fine_depthmap_predictions)\t\t\t\t\t\t\t\t\t\t\n\n\t# compute cost function \n\tfine_cost = nf.get_cost_function(depthmaps_predicted = fine_depthmap_predictions, \n\t\t\t\t\t\t\t\t\tdepthmaps_groundtruth = depthmaps_groundtruth)\n\n\t# calculate and run optimizer \n\toptimizer_fine = nf.get_fine_optimizer(fine_cost)\t\n\tsess.run(tf.global_variables_initializer())\t\t\t\n\tsess.run(optimizer_fine)\n\n\t# this code makes sure that all info gets written to tensorboard \n\tmerged_summary = sess.run(tf.summary.merge_all())\n\twriter.add_summary(merged_summary)\n\twriter.close()\n\n\n\t\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(len(peptidasesList))
<|reserved_special_token_0|>
for i in range(len(peptidasesList)):
if peptidasesList.loc[i, 'PDB'] not in bindingSiteDic:
bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[
i, 'chain/kegg compound']: [peptidasesList.loc[i,
'resid/chebi id']]}
elif peptidasesList.loc[i, 'chain/kegg compound'] not in bindingSiteDic[
peptidasesList.loc[i, 'PDB']]:
bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[
i, 'chain/kegg compound']: [peptidasesList.loc[i,
'resid/chebi id']]}
else:
bindingSiteDic[peptidasesList.loc[i, 'PDB']][peptidasesList.loc[i,
'chain/kegg compound']].append(peptidasesList.loc[i,
'resid/chebi id'])
for protein in bindingSiteDic:
for chain in bindingSiteDic[protein]:
bindingSiteDic[protein][chain] = [int(x) for x in list(set(
bindingSiteDic[protein][chain]))]
<|reserved_special_token_0|>
uniqueList.reset_index(drop=True).iloc[20:,]
<|reserved_special_token_0|>
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])
protein_start_time = datetime.now()
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
chain = chainOrder
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
columns = np.array(list(oneChain.iloc[:, 0]))
row_index = oneChain.iloc[:, 0]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),
index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row,
'Center'] - oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda
x: x * x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nsmallest(n_bigger,
row_list)))
target_col = columns[result]
target_list.append(target_col)
neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[
row], str(target_col)]
protein_end_time = datetime.now()
print(pdbID, ' Duration: {}'.format(protein_end_time - protein_start_time))
<|reserved_special_token_0|>
print('The total Duration: {}'.format(end_time - start_time))
print(time.time())
<|reserved_special_token_0|>
for pdbid in uniqueList.iloc[:, 0]:
exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')
if not exist:
PDB.retrieve_pdb_file(pdb_code=pdbid, pdir='../pdb', file_format='pdb')
<|reserved_special_token_0|>
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()[1],
residue.get_resname(), center, cToRGroup, pdbID,
chainOrder]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()[1],
residue.get_resname(), center, cToRGroup, pdbID,
chainOrder]
<|reserved_special_token_0|>
print(len(oneChain))
print(time.time())
<|reserved_special_token_0|>
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row, 'Center'] -
oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x *
x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))
target_col = columns[result]
target_list.append(target_col)
print(time.time())
<|reserved_special_token_0|>
sortedD[:, len(oneChain) - 10:]
distanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) -
10:]
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),
index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row,
'Center'] - oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda
x: x * x, coordinatesSubstraction))))
print(time.time())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dir_path = os.getcwd()
peptidasesList = pd.read_csv('./MCSA_EC3.4_peptidases.csv')
peptidasesList = peptidasesList[peptidasesList.iloc[:, 4] == 'residue']
peptidasesList = peptidasesList.reset_index(drop=True)
print(len(peptidasesList))
bindingSiteDic = {}
for i in range(len(peptidasesList)):
if peptidasesList.loc[i, 'PDB'] not in bindingSiteDic:
bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[
i, 'chain/kegg compound']: [peptidasesList.loc[i,
'resid/chebi id']]}
elif peptidasesList.loc[i, 'chain/kegg compound'] not in bindingSiteDic[
peptidasesList.loc[i, 'PDB']]:
bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[
i, 'chain/kegg compound']: [peptidasesList.loc[i,
'resid/chebi id']]}
else:
bindingSiteDic[peptidasesList.loc[i, 'PDB']][peptidasesList.loc[i,
'chain/kegg compound']].append(peptidasesList.loc[i,
'resid/chebi id'])
for protein in bindingSiteDic:
for chain in bindingSiteDic[protein]:
bindingSiteDic[protein][chain] = [int(x) for x in list(set(
bindingSiteDic[protein][chain]))]
uniqueList = peptidasesList[['PDB', 'chain/kegg compound']].drop_duplicates()
uniqueList.reset_index(drop=True).iloc[20:,]
backbone = ['N', 'CA', 'C', 'O']
aminoAcidCodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLY', 'GLU',
'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'PYL', 'SER', 'SEC',
'THR', 'TRP', 'TYR', 'TRP', 'VAL']
neighhor_df = pd.DataFrame(columns=['proteinid', 'chain', 'aaid', 'neighborid']
)
n_bigger = 5
target_list = []
start_time = datetime.now()
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])
protein_start_time = datetime.now()
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
chain = chainOrder
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
columns = np.array(list(oneChain.iloc[:, 0]))
row_index = oneChain.iloc[:, 0]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),
index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row,
'Center'] - oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda
x: x * x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nsmallest(n_bigger,
row_list)))
target_col = columns[result]
target_list.append(target_col)
neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[
row], str(target_col)]
protein_end_time = datetime.now()
print(pdbID, ' Duration: {}'.format(protein_end_time - protein_start_time))
end_time = datetime.now()
print('The total Duration: {}'.format(end_time - start_time))
print(time.time())
pdbID = uniqueList.iloc[35, 0]
chainOrder = uniqueList.iloc[35, 1]
PDB = PDBList()
for pdbid in uniqueList.iloc[:, 0]:
exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')
if not exist:
PDB.retrieve_pdb_file(pdb_code=pdbid, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction',
'pdbid', 'chain'])
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()[1],
residue.get_resname(), center, cToRGroup, pdbID,
chainOrder]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()[1],
residue.get_resname(), center, cToRGroup, pdbID,
chainOrder]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list
(oneChain.iloc[:, 0]))
print(len(oneChain))
print(time.time())
numResidue = len(oneChain)
columns = np.array(list(oneChain.iloc[:, 0]))
n_bigger = 3
target_list = []
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row, 'Center'] -
oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x *
x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))
target_col = columns[result]
target_list.append(target_col)
print(time.time())
sortedDistance = distanceMatrix.apply(lambda x: np.sort(x), axis=1)
sortedD = np.array(sortedDistance.tolist())
sortedD[:, len(oneChain) - 10:]
distanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) -
10:]
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),
index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row,
'Center'] - oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda
x: x * x, coordinatesSubstraction))))
print(time.time())
<|reserved_special_token_1|>
from Bio.PDB import *
import urllib.request
import numpy as np
import pandas as pd
from math import sqrt
import time
import os
import heapq
from datetime import datetime
dir_path = os.getcwd()
peptidasesList = pd.read_csv('./MCSA_EC3.4_peptidases.csv')
peptidasesList = peptidasesList[peptidasesList.iloc[:, 4] == 'residue']
peptidasesList = peptidasesList.reset_index(drop=True)
print(len(peptidasesList))
bindingSiteDic = {}
for i in range(len(peptidasesList)):
if peptidasesList.loc[i, 'PDB'] not in bindingSiteDic:
bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[
i, 'chain/kegg compound']: [peptidasesList.loc[i,
'resid/chebi id']]}
elif peptidasesList.loc[i, 'chain/kegg compound'] not in bindingSiteDic[
peptidasesList.loc[i, 'PDB']]:
bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[
i, 'chain/kegg compound']: [peptidasesList.loc[i,
'resid/chebi id']]}
else:
bindingSiteDic[peptidasesList.loc[i, 'PDB']][peptidasesList.loc[i,
'chain/kegg compound']].append(peptidasesList.loc[i,
'resid/chebi id'])
for protein in bindingSiteDic:
for chain in bindingSiteDic[protein]:
bindingSiteDic[protein][chain] = [int(x) for x in list(set(
bindingSiteDic[protein][chain]))]
uniqueList = peptidasesList[['PDB', 'chain/kegg compound']].drop_duplicates()
uniqueList.reset_index(drop=True).iloc[20:,]
backbone = ['N', 'CA', 'C', 'O']
aminoAcidCodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLY', 'GLU',
'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'PYL', 'SER', 'SEC',
'THR', 'TRP', 'TYR', 'TRP', 'VAL']
neighhor_df = pd.DataFrame(columns=['proteinid', 'chain', 'aaid', 'neighborid']
)
n_bigger = 5
target_list = []
start_time = datetime.now()
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])
protein_start_time = datetime.now()
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
chain = chainOrder
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
columns = np.array(list(oneChain.iloc[:, 0]))
row_index = oneChain.iloc[:, 0]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),
index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row,
'Center'] - oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda
x: x * x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nsmallest(n_bigger,
row_list)))
target_col = columns[result]
target_list.append(target_col)
neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[
row], str(target_col)]
protein_end_time = datetime.now()
print(pdbID, ' Duration: {}'.format(protein_end_time - protein_start_time))
end_time = datetime.now()
print('The total Duration: {}'.format(end_time - start_time))
print(time.time())
pdbID = uniqueList.iloc[35, 0]
chainOrder = uniqueList.iloc[35, 1]
PDB = PDBList()
for pdbid in uniqueList.iloc[:, 0]:
exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')
if not exist:
PDB.retrieve_pdb_file(pdb_code=pdbid, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction',
'pdbid', 'chain'])
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()[1],
residue.get_resname(), center, cToRGroup, pdbID,
chainOrder]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()[1],
residue.get_resname(), center, cToRGroup, pdbID,
chainOrder]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list
(oneChain.iloc[:, 0]))
print(len(oneChain))
print(time.time())
numResidue = len(oneChain)
columns = np.array(list(oneChain.iloc[:, 0]))
n_bigger = 3
target_list = []
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row, 'Center'] -
oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x *
x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))
target_col = columns[result]
target_list.append(target_col)
print(time.time())
sortedDistance = distanceMatrix.apply(lambda x: np.sort(x), axis=1)
sortedD = np.array(sortedDistance.tolist())
sortedD[:, len(oneChain) - 10:]
distanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) -
10:]
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')
p = PDBParser()
structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')
oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])
if structure.header['resolution'] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != 'GLY':
point = vectors.Vector([0, 0, 0])
for atom in residue:
if atom.get_name() not in backbone:
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue['CA'].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
else:
center = residue['CA'].get_vector()
cToRGroup = center - (residue['C'].get_vector() +
residue['N'].get_vector() + residue['O'].
get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()
[1], residue.get_resname(), center, cToRGroup]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),
index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + 'th row')
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row,
'Center'] - oneChain.loc[column, 'Center'])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda
x: x * x, coordinatesSubstraction))))
print(time.time())
<|reserved_special_token_1|>
from Bio.PDB import *
import urllib.request
import numpy as np
import pandas as pd
from math import sqrt
import time
import os
import heapq
from datetime import datetime
dir_path = os.getcwd()
peptidasesList = pd.read_csv("./MCSA_EC3.4_peptidases.csv")
peptidasesList = peptidasesList[peptidasesList.iloc[:, 4] == "residue"]
peptidasesList = peptidasesList.reset_index(drop=True)
print(len(peptidasesList))
bindingSiteDic = {}
for i in range(len(peptidasesList)):
# print(bindingSiteDic)
if peptidasesList.loc[i, "PDB"] not in bindingSiteDic:
bindingSiteDic[peptidasesList.loc[i, "PDB"]] = {
peptidasesList.loc[i, "chain/kegg compound"]: [peptidasesList.loc[i, "resid/chebi id"]]}
elif peptidasesList.loc[i, "chain/kegg compound"] not in bindingSiteDic[peptidasesList.loc[i, "PDB"]]:
bindingSiteDic[peptidasesList.loc[i, "PDB"]] = {
peptidasesList.loc[i, "chain/kegg compound"]: [peptidasesList.loc[i, "resid/chebi id"]]}
else:
bindingSiteDic[peptidasesList.loc[i, "PDB"]][peptidasesList.loc[i, "chain/kegg compound"]].append(
peptidasesList.loc[i, "resid/chebi id"])
for protein in bindingSiteDic:
for chain in bindingSiteDic[protein]:
bindingSiteDic[protein][chain] = [int(x) for x in list(set(bindingSiteDic[protein][chain]))]
uniqueList = peptidasesList[["PDB", "chain/kegg compound"]].drop_duplicates()
uniqueList.reset_index(drop=True).iloc[20:, ]
backbone = ["N", "CA", "C", "O"]
aminoAcidCodes = ["ALA", "ARG", "ASN", "ASP", "CYS", "GLN", "GLY", "GLU", "HIS", "ILE", "LEU", "LYS",
"MET", "PHE", "PRO", "PYL", "SER", "SEC", "THR", "TRP", "TYR", "TRP", "VAL"]
neighhor_df = pd.DataFrame(columns=["proteinid", "chain", "aaid", "neighborid"])
n_bigger = 5
target_list = []
start_time = datetime.now()
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir="../pdb", file_format="pdb")
p = PDBParser()
structure = p.get_structure("X", "../pdb/pdb" + pdbID + ".ent")
oneChain = pd.DataFrame(columns=["Seq", "Residue", "Center", "Direction"])
protein_start_time = datetime.now()
if structure.header["resolution"] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
chain = chainOrder
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != "GLY":
point = vectors.Vector([0, 0, 0])
for atom in residue:
if (atom.get_name() not in backbone):
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue["CA"].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,
cToRGroup]
else:
center = residue["CA"].get_vector()
cToRGroup = center - (residue["C"].get_vector() + residue["N"].get_vector() + residue[
"O"].get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,
cToRGroup]
columns = np.array(list(oneChain.iloc[:, 0]))
row_index = oneChain.iloc[:, 0]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + "th row")
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row, "Center"] - oneChain.loc[column, "Center"])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))
# distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nsmallest(n_bigger, row_list)))
target_col = columns[result]
target_list.append(target_col)
neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[row], str(target_col)]
protein_end_time = datetime.now()
print(pdbID, " Duration: {}".format(protein_end_time - protein_start_time))
end_time = datetime.now()
print("The total Duration: {}".format(end_time - start_time))
print(time.time())
pdbID = uniqueList.iloc[35, 0]
chainOrder = uniqueList.iloc[35, 1]
PDB = PDBList()
for pdbid in uniqueList.iloc[:, 0]:
exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')
if not exist:
PDB.retrieve_pdb_file(pdb_code=pdbid, pdir="../pdb", file_format="pdb")
p = PDBParser()
structure = p.get_structure("X", "../pdb/pdb" + pdbID + ".ent")
oneChain = pd.DataFrame(columns=["Seq", "Residue", "Center", "Direction", "pdbid", "chain"])
if structure.header["resolution"] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]: # Chain information not in pdb file
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes: # Only treat common amino acid
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != "GLY": # Glysine as a special case
point = vectors.Vector([0, 0, 0])
for atom in residue:
if (atom.get_name() not in backbone):
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue["CA"].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center, cToRGroup,
pdbID, chainOrder]
else:
center = residue["CA"].get_vector()
cToRGroup = center - (residue["C"].get_vector() + residue["N"].get_vector() + residue[
"O"].get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center, cToRGroup,
pdbID, chainOrder]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list(oneChain.iloc[:, 0]))
print(len(oneChain))
print(time.time())
numResidue = len(oneChain)
columns = np.array(list(oneChain.iloc[:, 0]))
n_bigger = 3
target_list = []
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + "th row")
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row, "Center"] - oneChain.loc[column, "Center"])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))
row_list = list(distanceMatrix.iloc[row, :])
result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))
target_col = columns[result]
target_list.append(target_col)
print(time.time())
sortedDistance = distanceMatrix.apply(lambda x: np.sort(x), axis=1)
sortedD = np.array(sortedDistance.tolist())
# get 10 biggest value
sortedD[:, len(oneChain) - 10:]
# get the index 10 biggest value
distanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) - 10:]
for eachRow in range(0, len(uniqueList)):
pdbID = uniqueList.iloc[eachRow, 0]
chainOrder = uniqueList.iloc[eachRow, 1]
PDB = PDBList()
PDB.retrieve_pdb_file(pdb_code=pdbID, pdir="../pdb", file_format="pdb")
p = PDBParser()
structure = p.get_structure("X", "../pdb/pdb" + pdbID + ".ent")
oneChain = pd.DataFrame(columns=["Seq", "Residue", "Center", "Direction"])
if structure.header["resolution"] <= 3.0:
if chainOrder in [x.id for x in list(structure[0].get_chains())]:
for residue in structure[0][chainOrder]:
if residue.get_resname() in aminoAcidCodes:
if len(list(residue.get_atoms())) > 3:
if residue.get_resname() != "GLY":
point = vectors.Vector([0, 0, 0])
for atom in residue:
if (atom.get_name() not in backbone):
point = point + atom.get_vector()
center = point.__div__(len(residue) - 4)
cToRGroup = residue["CA"].get_vector() - center
oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,
cToRGroup]
else:
center = residue["CA"].get_vector()
cToRGroup = center - (residue["C"].get_vector() + residue["N"].get_vector() + residue[
"O"].get_vector()).__div__(3)
oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,
cToRGroup]
distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list(oneChain.iloc[:, 0]))
print(time.time())
numResidue = len(oneChain)
for row in range(0, numResidue):
if row % 50 == 0:
print(str(row) + "th row")
for column in range(0, numResidue):
coordinatesSubstraction = list(oneChain.loc[row, "Center"] - oneChain.loc[column, "Center"])
distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))
print(time.time())
|
flexible
|
{
"blob_id": "67b1cdfa514aac4fdac3804285ec8d0aebce944d",
"index": 6068,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(len(peptidasesList))\n<mask token>\nfor i in range(len(peptidasesList)):\n if peptidasesList.loc[i, 'PDB'] not in bindingSiteDic:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[\n i, 'chain/kegg compound']: [peptidasesList.loc[i,\n 'resid/chebi id']]}\n elif peptidasesList.loc[i, 'chain/kegg compound'] not in bindingSiteDic[\n peptidasesList.loc[i, 'PDB']]:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[\n i, 'chain/kegg compound']: [peptidasesList.loc[i,\n 'resid/chebi id']]}\n else:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']][peptidasesList.loc[i,\n 'chain/kegg compound']].append(peptidasesList.loc[i,\n 'resid/chebi id'])\nfor protein in bindingSiteDic:\n for chain in bindingSiteDic[protein]:\n bindingSiteDic[protein][chain] = [int(x) for x in list(set(\n bindingSiteDic[protein][chain]))]\n<mask token>\nuniqueList.reset_index(drop=True).iloc[20:,]\n<mask token>\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')\n p = PDBParser()\n structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\n oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])\n protein_start_time = datetime.now()\n if structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n chain = chainOrder\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n columns = np.array(list(oneChain.iloc[:, 0]))\n row_index = oneChain.iloc[:, 0]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),\n index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row,\n 'Center'] - oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda\n x: x * x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nsmallest(n_bigger,\n row_list)))\n target_col = columns[result]\n target_list.append(target_col)\n neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[\n row], str(target_col)]\n protein_end_time = datetime.now()\n print(pdbID, ' Duration: {}'.format(protein_end_time - protein_start_time))\n<mask token>\nprint('The total Duration: {}'.format(end_time - start_time))\nprint(time.time())\n<mask token>\nfor pdbid in uniqueList.iloc[:, 0]:\n exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')\n if not exist:\n PDB.retrieve_pdb_file(pdb_code=pdbid, pdir='../pdb', file_format='pdb')\n<mask token>\nif structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()[1],\n residue.get_resname(), center, cToRGroup, pdbID,\n chainOrder]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()[1],\n residue.get_resname(), center, cToRGroup, pdbID,\n chainOrder]\n<mask token>\nprint(len(oneChain))\nprint(time.time())\n<mask token>\nfor row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row, 'Center'] -\n oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x *\n x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))\n target_col = columns[result]\n target_list.append(target_col)\nprint(time.time())\n<mask token>\nsortedD[:, len(oneChain) - 10:]\ndistanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) -\n 10:]\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')\n p = PDBParser()\n structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\n oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])\n if structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),\n index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row,\n 'Center'] - oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda\n x: x * x, coordinatesSubstraction))))\n print(time.time())\n",
"step-3": "<mask token>\ndir_path = os.getcwd()\npeptidasesList = pd.read_csv('./MCSA_EC3.4_peptidases.csv')\npeptidasesList = peptidasesList[peptidasesList.iloc[:, 4] == 'residue']\npeptidasesList = peptidasesList.reset_index(drop=True)\nprint(len(peptidasesList))\nbindingSiteDic = {}\nfor i in range(len(peptidasesList)):\n if peptidasesList.loc[i, 'PDB'] not in bindingSiteDic:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[\n i, 'chain/kegg compound']: [peptidasesList.loc[i,\n 'resid/chebi id']]}\n elif peptidasesList.loc[i, 'chain/kegg compound'] not in bindingSiteDic[\n peptidasesList.loc[i, 'PDB']]:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[\n i, 'chain/kegg compound']: [peptidasesList.loc[i,\n 'resid/chebi id']]}\n else:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']][peptidasesList.loc[i,\n 'chain/kegg compound']].append(peptidasesList.loc[i,\n 'resid/chebi id'])\nfor protein in bindingSiteDic:\n for chain in bindingSiteDic[protein]:\n bindingSiteDic[protein][chain] = [int(x) for x in list(set(\n bindingSiteDic[protein][chain]))]\nuniqueList = peptidasesList[['PDB', 'chain/kegg compound']].drop_duplicates()\nuniqueList.reset_index(drop=True).iloc[20:,]\nbackbone = ['N', 'CA', 'C', 'O']\naminoAcidCodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLY', 'GLU',\n 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'PYL', 'SER', 'SEC',\n 'THR', 'TRP', 'TYR', 'TRP', 'VAL']\nneighhor_df = pd.DataFrame(columns=['proteinid', 'chain', 'aaid', 'neighborid']\n )\nn_bigger = 5\ntarget_list = []\nstart_time = datetime.now()\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')\n p = PDBParser()\n structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\n oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])\n protein_start_time = datetime.now()\n if structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n chain = chainOrder\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n columns = np.array(list(oneChain.iloc[:, 0]))\n row_index = oneChain.iloc[:, 0]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),\n index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row,\n 'Center'] - oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda\n x: x * x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nsmallest(n_bigger,\n row_list)))\n target_col = columns[result]\n target_list.append(target_col)\n neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[\n row], str(target_col)]\n protein_end_time = datetime.now()\n print(pdbID, ' Duration: {}'.format(protein_end_time - protein_start_time))\nend_time = datetime.now()\nprint('The total Duration: {}'.format(end_time - start_time))\nprint(time.time())\npdbID = uniqueList.iloc[35, 0]\nchainOrder = uniqueList.iloc[35, 1]\nPDB = PDBList()\nfor pdbid in uniqueList.iloc[:, 0]:\n exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')\n if not exist:\n PDB.retrieve_pdb_file(pdb_code=pdbid, pdir='../pdb', file_format='pdb')\np = PDBParser()\nstructure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\noneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction',\n 'pdbid', 'chain'])\nif structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()[1],\n residue.get_resname(), center, cToRGroup, pdbID,\n chainOrder]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()[1],\n residue.get_resname(), center, cToRGroup, pdbID,\n chainOrder]\ndistanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list\n (oneChain.iloc[:, 0]))\nprint(len(oneChain))\nprint(time.time())\nnumResidue = len(oneChain)\ncolumns = np.array(list(oneChain.iloc[:, 0]))\nn_bigger = 3\ntarget_list = []\nfor row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row, 'Center'] -\n oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x *\n x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))\n target_col = columns[result]\n target_list.append(target_col)\nprint(time.time())\nsortedDistance = distanceMatrix.apply(lambda x: np.sort(x), axis=1)\nsortedD = np.array(sortedDistance.tolist())\nsortedD[:, len(oneChain) - 10:]\ndistanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) -\n 10:]\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')\n p = PDBParser()\n structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\n oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])\n if structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),\n index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row,\n 'Center'] - oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda\n x: x * x, coordinatesSubstraction))))\n print(time.time())\n",
"step-4": "from Bio.PDB import *\nimport urllib.request\nimport numpy as np\nimport pandas as pd\nfrom math import sqrt\nimport time\nimport os\nimport heapq\nfrom datetime import datetime\ndir_path = os.getcwd()\npeptidasesList = pd.read_csv('./MCSA_EC3.4_peptidases.csv')\npeptidasesList = peptidasesList[peptidasesList.iloc[:, 4] == 'residue']\npeptidasesList = peptidasesList.reset_index(drop=True)\nprint(len(peptidasesList))\nbindingSiteDic = {}\nfor i in range(len(peptidasesList)):\n if peptidasesList.loc[i, 'PDB'] not in bindingSiteDic:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[\n i, 'chain/kegg compound']: [peptidasesList.loc[i,\n 'resid/chebi id']]}\n elif peptidasesList.loc[i, 'chain/kegg compound'] not in bindingSiteDic[\n peptidasesList.loc[i, 'PDB']]:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']] = {peptidasesList.loc[\n i, 'chain/kegg compound']: [peptidasesList.loc[i,\n 'resid/chebi id']]}\n else:\n bindingSiteDic[peptidasesList.loc[i, 'PDB']][peptidasesList.loc[i,\n 'chain/kegg compound']].append(peptidasesList.loc[i,\n 'resid/chebi id'])\nfor protein in bindingSiteDic:\n for chain in bindingSiteDic[protein]:\n bindingSiteDic[protein][chain] = [int(x) for x in list(set(\n bindingSiteDic[protein][chain]))]\nuniqueList = peptidasesList[['PDB', 'chain/kegg compound']].drop_duplicates()\nuniqueList.reset_index(drop=True).iloc[20:,]\nbackbone = ['N', 'CA', 'C', 'O']\naminoAcidCodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLY', 'GLU',\n 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'PYL', 'SER', 'SEC',\n 'THR', 'TRP', 'TYR', 'TRP', 'VAL']\nneighhor_df = pd.DataFrame(columns=['proteinid', 'chain', 'aaid', 'neighborid']\n )\nn_bigger = 5\ntarget_list = []\nstart_time = datetime.now()\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')\n p = PDBParser()\n structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\n oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])\n protein_start_time = datetime.now()\n if structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n chain = chainOrder\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n columns = np.array(list(oneChain.iloc[:, 0]))\n row_index = oneChain.iloc[:, 0]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),\n index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row,\n 'Center'] - oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda\n x: x * x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nsmallest(n_bigger,\n row_list)))\n target_col = columns[result]\n target_list.append(target_col)\n neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[\n row], str(target_col)]\n protein_end_time = datetime.now()\n print(pdbID, ' Duration: {}'.format(protein_end_time - protein_start_time))\nend_time = datetime.now()\nprint('The total Duration: {}'.format(end_time - start_time))\nprint(time.time())\npdbID = uniqueList.iloc[35, 0]\nchainOrder = uniqueList.iloc[35, 1]\nPDB = PDBList()\nfor pdbid in uniqueList.iloc[:, 0]:\n exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')\n if not exist:\n PDB.retrieve_pdb_file(pdb_code=pdbid, pdir='../pdb', file_format='pdb')\np = PDBParser()\nstructure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\noneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction',\n 'pdbid', 'chain'])\nif structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()[1],\n residue.get_resname(), center, cToRGroup, pdbID,\n chainOrder]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()[1],\n residue.get_resname(), center, cToRGroup, pdbID,\n chainOrder]\ndistanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list\n (oneChain.iloc[:, 0]))\nprint(len(oneChain))\nprint(time.time())\nnumResidue = len(oneChain)\ncolumns = np.array(list(oneChain.iloc[:, 0]))\nn_bigger = 3\ntarget_list = []\nfor row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row, 'Center'] -\n oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x *\n x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))\n target_col = columns[result]\n target_list.append(target_col)\nprint(time.time())\nsortedDistance = distanceMatrix.apply(lambda x: np.sort(x), axis=1)\nsortedD = np.array(sortedDistance.tolist())\nsortedD[:, len(oneChain) - 10:]\ndistanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) -\n 10:]\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir='../pdb', file_format='pdb')\n p = PDBParser()\n structure = p.get_structure('X', '../pdb/pdb' + pdbID + '.ent')\n oneChain = pd.DataFrame(columns=['Seq', 'Residue', 'Center', 'Direction'])\n if structure.header['resolution'] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != 'GLY':\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if atom.get_name() not in backbone:\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue['CA'].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n else:\n center = residue['CA'].get_vector()\n cToRGroup = center - (residue['C'].get_vector() +\n residue['N'].get_vector() + residue['O'].\n get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()\n [1], residue.get_resname(), center, cToRGroup]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]),\n index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + 'th row')\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row,\n 'Center'] - oneChain.loc[column, 'Center'])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda\n x: x * x, coordinatesSubstraction))))\n print(time.time())\n",
"step-5": "from Bio.PDB import *\nimport urllib.request\nimport numpy as np\nimport pandas as pd\nfrom math import sqrt\nimport time\nimport os\nimport heapq\nfrom datetime import datetime\n\ndir_path = os.getcwd()\n\npeptidasesList = pd.read_csv(\"./MCSA_EC3.4_peptidases.csv\")\npeptidasesList = peptidasesList[peptidasesList.iloc[:, 4] == \"residue\"]\n\npeptidasesList = peptidasesList.reset_index(drop=True)\nprint(len(peptidasesList))\n\nbindingSiteDic = {}\nfor i in range(len(peptidasesList)):\n # print(bindingSiteDic)\n if peptidasesList.loc[i, \"PDB\"] not in bindingSiteDic:\n bindingSiteDic[peptidasesList.loc[i, \"PDB\"]] = {\n peptidasesList.loc[i, \"chain/kegg compound\"]: [peptidasesList.loc[i, \"resid/chebi id\"]]}\n elif peptidasesList.loc[i, \"chain/kegg compound\"] not in bindingSiteDic[peptidasesList.loc[i, \"PDB\"]]:\n bindingSiteDic[peptidasesList.loc[i, \"PDB\"]] = {\n peptidasesList.loc[i, \"chain/kegg compound\"]: [peptidasesList.loc[i, \"resid/chebi id\"]]}\n else:\n bindingSiteDic[peptidasesList.loc[i, \"PDB\"]][peptidasesList.loc[i, \"chain/kegg compound\"]].append(\n peptidasesList.loc[i, \"resid/chebi id\"])\nfor protein in bindingSiteDic:\n for chain in bindingSiteDic[protein]:\n bindingSiteDic[protein][chain] = [int(x) for x in list(set(bindingSiteDic[protein][chain]))]\n\nuniqueList = peptidasesList[[\"PDB\", \"chain/kegg compound\"]].drop_duplicates()\n\nuniqueList.reset_index(drop=True).iloc[20:, ]\n\nbackbone = [\"N\", \"CA\", \"C\", \"O\"]\naminoAcidCodes = [\"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLN\", \"GLY\", \"GLU\", \"HIS\", \"ILE\", \"LEU\", \"LYS\",\n \"MET\", \"PHE\", \"PRO\", \"PYL\", \"SER\", \"SEC\", \"THR\", \"TRP\", \"TYR\", \"TRP\", \"VAL\"]\n\nneighhor_df = pd.DataFrame(columns=[\"proteinid\", \"chain\", \"aaid\", \"neighborid\"])\nn_bigger = 5\ntarget_list = []\nstart_time = datetime.now()\n\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir=\"../pdb\", file_format=\"pdb\")\n p = PDBParser()\n structure = p.get_structure(\"X\", \"../pdb/pdb\" + pdbID + \".ent\")\n oneChain = pd.DataFrame(columns=[\"Seq\", \"Residue\", \"Center\", \"Direction\"])\n\n protein_start_time = datetime.now()\n\n if structure.header[\"resolution\"] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n chain = chainOrder\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != \"GLY\":\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if (atom.get_name() not in backbone):\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue[\"CA\"].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,\n cToRGroup]\n else:\n center = residue[\"CA\"].get_vector()\n cToRGroup = center - (residue[\"C\"].get_vector() + residue[\"N\"].get_vector() + residue[\n \"O\"].get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,\n cToRGroup]\n\n columns = np.array(list(oneChain.iloc[:, 0]))\n row_index = oneChain.iloc[:, 0]\n\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + \"th row\")\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row, \"Center\"] - oneChain.loc[column, \"Center\"])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))\n # distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))\n\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nsmallest(n_bigger, row_list)))\n target_col = columns[result]\n target_list.append(target_col)\n neighhor_df.loc[len(neighhor_df)] = [pdbID, chain, row_index[row], str(target_col)]\n\n protein_end_time = datetime.now()\n print(pdbID, \" Duration: {}\".format(protein_end_time - protein_start_time))\n\nend_time = datetime.now()\nprint(\"The total Duration: {}\".format(end_time - start_time))\nprint(time.time())\n\npdbID = uniqueList.iloc[35, 0]\nchainOrder = uniqueList.iloc[35, 1]\nPDB = PDBList()\nfor pdbid in uniqueList.iloc[:, 0]:\n exist = os.path.isfile('../pdb/pdb' + pdbID + '.ent')\n if not exist:\n PDB.retrieve_pdb_file(pdb_code=pdbid, pdir=\"../pdb\", file_format=\"pdb\")\n\np = PDBParser()\nstructure = p.get_structure(\"X\", \"../pdb/pdb\" + pdbID + \".ent\")\n\noneChain = pd.DataFrame(columns=[\"Seq\", \"Residue\", \"Center\", \"Direction\", \"pdbid\", \"chain\"])\nif structure.header[\"resolution\"] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]: # Chain information not in pdb file\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes: # Only treat common amino acid\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != \"GLY\": # Glysine as a special case\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if (atom.get_name() not in backbone):\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue[\"CA\"].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center, cToRGroup,\n pdbID, chainOrder]\n else:\n center = residue[\"CA\"].get_vector()\n cToRGroup = center - (residue[\"C\"].get_vector() + residue[\"N\"].get_vector() + residue[\n \"O\"].get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center, cToRGroup,\n pdbID, chainOrder]\n\ndistanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list(oneChain.iloc[:, 0]))\nprint(len(oneChain))\n\nprint(time.time())\nnumResidue = len(oneChain)\ncolumns = np.array(list(oneChain.iloc[:, 0]))\nn_bigger = 3\ntarget_list = []\nfor row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + \"th row\")\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row, \"Center\"] - oneChain.loc[column, \"Center\"])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))\n row_list = list(distanceMatrix.iloc[row, :])\n result = list(map(row_list.index, heapq.nlargest(n_bigger, row_list)))\n target_col = columns[result]\n target_list.append(target_col)\n\nprint(time.time())\n\nsortedDistance = distanceMatrix.apply(lambda x: np.sort(x), axis=1)\n\nsortedD = np.array(sortedDistance.tolist())\n# get 10 biggest value\nsortedD[:, len(oneChain) - 10:]\n\n# get the index 10 biggest value\ndistanceMatrix.apply(lambda x: np.argsort(x), axis=1).iloc[:, len(oneChain) - 10:]\n\nfor eachRow in range(0, len(uniqueList)):\n pdbID = uniqueList.iloc[eachRow, 0]\n chainOrder = uniqueList.iloc[eachRow, 1]\n PDB = PDBList()\n PDB.retrieve_pdb_file(pdb_code=pdbID, pdir=\"../pdb\", file_format=\"pdb\")\n p = PDBParser()\n structure = p.get_structure(\"X\", \"../pdb/pdb\" + pdbID + \".ent\")\n oneChain = pd.DataFrame(columns=[\"Seq\", \"Residue\", \"Center\", \"Direction\"])\n if structure.header[\"resolution\"] <= 3.0:\n if chainOrder in [x.id for x in list(structure[0].get_chains())]:\n for residue in structure[0][chainOrder]:\n if residue.get_resname() in aminoAcidCodes:\n if len(list(residue.get_atoms())) > 3:\n if residue.get_resname() != \"GLY\":\n point = vectors.Vector([0, 0, 0])\n for atom in residue:\n if (atom.get_name() not in backbone):\n point = point + atom.get_vector()\n center = point.__div__(len(residue) - 4)\n cToRGroup = residue[\"CA\"].get_vector() - center\n oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,\n cToRGroup]\n else:\n center = residue[\"CA\"].get_vector()\n cToRGroup = center - (residue[\"C\"].get_vector() + residue[\"N\"].get_vector() + residue[\n \"O\"].get_vector()).__div__(3)\n oneChain.loc[len(oneChain)] = [residue.get_id()[1], residue.get_resname(), center,\n cToRGroup]\n distanceMatrix = pd.DataFrame(columns=list(oneChain.iloc[:, 0]), index=list(oneChain.iloc[:, 0]))\n print(time.time())\n numResidue = len(oneChain)\n for row in range(0, numResidue):\n if row % 50 == 0:\n print(str(row) + \"th row\")\n for column in range(0, numResidue):\n coordinatesSubstraction = list(oneChain.loc[row, \"Center\"] - oneChain.loc[column, \"Center\"])\n distanceMatrix.iloc[row, column] = sqrt(sum(list(map(lambda x: x * x, coordinatesSubstraction))))\n print(time.time())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import rospy
from op3_utils.op3_utils import *
from vision import *
import cv2
import sys
import rosnode
#Yellow >> Right
#Red >> Left
class States:
INIT = -1
GET_READY = 1
FIND_BAR = 2
WALK_2_BAR = 3
WALK_SIDEWAYS = 4
PICK_BAR = 5
WALK_WITH_BAR = 6
LIFT_BAR = 7
WALK_2_FINISH = 8
END = 99
# Iinitialize Node
rospy.init_node('fira_weightlifting')
# Create robot ('package_name')
robot = Robot('fira_weightlifting')
while not rospy.is_shutdown():
if '/op3_manager' in rosnode.get_node_names():
rospy.loginfo('Found op3 manager')
break
else:
rospy.loginfo('Waiting for op3 manager')
rospy.Rate(20).sleep()
# Make sure every publisher has registered to their topic,
# avoiding lost messages
rospy.sleep(4)
DEGREE2RADIAN = np.pi / 180
def init():
# Set ctrl modules of all actions to joint, so we can reset robot position
robot.setGeneralControlModule("action_module")
robot.moveGripper(left=100.0,right=100.0)
#robot.setGrippersPos(left=0.0, right=0.0)
# >0 is opened
# Call initial robot position
robot.playMotion(1, wait_for_end=True)
# Set ctrl module to walking, this actually only sets the legs
robot.walk_set_param_pub.publish(robot.walking_params[0])
robot.setGeneralControlModule("walking_module")
# Set joint modules of head joints to none so we can control them directly
robot.setJointsControlModule(["head_pan", "head_tilt"], ["none", "none"])
robot.setJointPos(["head_tilt"], [-0.7])
#0 is looking straight forward, <0 is looking down
rospy.sleep(1.0)
tickrate = 30
rate = rospy.Rate(tickrate)
currState = States.INIT
cap = cv2.VideoCapture(0)
current_head_tilt = -0.7
while not rospy.is_shutdown():
ret, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame = cv2.resize(frame, (0,0),fx=0.5,fy=0.5, interpolation=cv2.INTER_CUBIC)
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cnts_yellow = findYellowCnts(hsv_frame)
cnts_red = findRedCnts(hsv_frame)
delta_head = 0
delta_lr = 0
bar_slope = 0
if (cnts_yellow is not None and cnts_red is not None):
cx_y, cy_y = findCentroid(cnts_yellow)
cx_r, cy_r = findCentroid(cnts_red)
delta_lr = focusCenter(hsv_frame, cx_y, cx_r)
#print('delta_lr: ' + str(delta_lr))
delta_head = headTilt(hsv_frame, cy_y, cy_r)
bar_slope = slope(cx_y, cy_y, cx_r, cy_r)
cv2.drawContours(hsv_frame, cnts_yellow, -1, (255,0,0), 2)
cv2.drawContours(hsv_frame, cnts_red, -1, (10,235,290), 2)
cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / 2)),5,(130, 40, 255), -1)
cv2.circle(hsv_frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)),5,(130, 40, 255), -1)
cv2.circle(hsv_frame, (cx_y, cy_y),5,(130, 40, 255), -1)
cv2.circle(hsv_frame, (cx_r, cy_r),5,(130, 40, 255), -1)
#cv2.imshow('Current view',hsv_frame)
#cv2.waitKey(33)
if currState == States.INIT:
init()
currState = States.GET_READY
elif currState == States.GET_READY:
print("[GET_READY]")
if robot.get_pressed_button() == 'start':
currState = States.FIND_BAR
#if cv2.waitKey(33) &0xFF == ord('f'):
# currState = States.FIND_BAR
elif currState == States.FIND_BAR:
print("[FIND_BAR]")
robot.walking_params.append(robot.loadWalkingParams('param.yaml'))
robot.setGeneralControlModule("walking_module")
robot.walking_params[1].x_move_amplitude = 0.005
robot.walking_params[1].balance_enable = False
robot.walking_params[1].y_move_amplitude = 0.003
#robot.walking_params[1].angle_move_amplitude = 1.75 * DEGREE2RADIAN
robot.walk_set_param_pub.publish(robot.walking_params[1])
rospy.sleep(2)
robot.walkStart()
currState = States.WALK_2_BAR
elif currState == States.WALK_2_BAR:
print("[WALK_2_BAR]")
#if(delta_head < -10):
head_tilt_delta = delta_head * 0.01
current_head_tilt += head_tilt_delta
current_head_tilt = max(current_head_tilt,-1.2)
print('current head: {}, head_tilt_delta: {}'.format(current_head_tilt,head_tilt_delta))
robot.moveHead(None, current_head_tilt)
print("delta_lr: {}".format(delta_lr))
ratio = 1
angle_delta = delta_lr * ratio
print("*********************************************")
robot.walking_params[1].angle_move_amplitude = angle_delta
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
'''
if(delta_lr > 20):
print("GO LEFT")
robot.walking_params[1].angle_move_amplitude = angle_delta
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
elif(delta_lr < -20):
print("GO RIGHT")
robot.walking_params[1].angle_move_amplitude = angle_delta
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
else:
print("GO FORWARD")
robot.walking_params[1].angle_move_amplitude = 0
robot.walk_set_param_pub.publish(robot.walking_params[1])
print("angle_move_amp: ", angle_delta)
'''
if(current_head_tilt == -1.2):
robot.walkStop()
robot.onlineWalkSetup(x=0.02, z=-0.025, foot_dist=0.08, foot_height=0.05)
currState = States.WALK_SIDEWAYS
continue
elif currState == States.WALK_SIDEWAYS:
ret, frame = cap.read()
print("bar_slope: {}".format(bar_slope))
bar_x = (cx_y + cx_r) / 2
bar_y = (cy_y + cy_r) / 2
print("bar_location: ({},{})".format(bar_x,bar_y))
x_err = bar_x - hsv_frame.shape[1] / 2
y_err = bar_y - hsv_frame.shape[0] *2 / 3
print("bar_error: ({},{})".format(x_err,y_err))
'''
if y_err > 20:
print('back')
robot.onlineWalkCommand(direction="backward", start_leg="right", step_num=2,
front_length=0.02, step_time=0.5)
rospy.sleep(2)
'''
if bar_slope <= -0.07:
print('turn left')
robot.onlineWalkCommand(direction="turn_left", start_leg="left", step_num=2,
front_length=0.0, step_angle=10.0,step_time=0.4)
rospy.sleep(2)
elif bar_slope > 0.07:
print('turn right')
robot.onlineWalkCommand(direction="turn_right", start_leg="right", step_num=2,
front_length=0.0, step_angle=10.0,step_time=0.4)
rospy.sleep(2)
'''
elif x_err > 30:
print('shift right')
robot.onlineWalkCommand(direction="right", start_leg="right", step_num=2,
side_length=0.01, step_time=0.4)
rospy.sleep(2.5)
elif x_err < -30:
print('shift left')
robot.onlineWalkCommand(direction="left", start_leg="left", step_num=2,
side_length=0.01, step_time=0.4)
rospy.sleep(2.5)
elif y_err < -20:
print('forward')
robot.onlineWalkCommand(direction="forward", start_leg="right", step_num=2,
front_length=0.02, step_time=0.4)
rospy.sleep(2)
'''
else:
print('success!!!')
# TODO removed sleep here
#rospy.sleep(6)
currState = States.PICK_BAR
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
'''
print("[WALK_SIDEWAYS]")
print("bar_slope: {}".format(bar_slope))
if(bar_slope > 0.1):
print("Turn facing right")
robot.walking_params[1].x_move_amplitude = 0
robot.walking_params[1].y_move_amplitude = -0.01
robot.walk_set_param_pub.publish(robot.walking_params[1])
rospy.sleep(2)
robot.walkStart()
rospy.sleep(2)
robot.walkStop()
elif(bar_slope < -0.1):
print("Turn facing left")
robot.walking_params[1].x_move_amplitude = 0
robot.walking_params[1].y_move_amplitude = 0.01
robot.walk_set_param_pub.publish(robot.walking_params[1])
rospy.sleep(2)
robot.walkStart()
rospy.sleep(2)
robot.walkStop()
else:
print("Keep facing forward")
currState = States.PICK_BAR
'''
elif currState == States.PICK_BAR:
rospy.loginfo("[PICK_BAR]")
# TODO testing
#rospy.sleep(2)
robot.setGeneralControlModule("none")
rospy.sleep(2)
robot.setGeneralControlModule("action_module")
robot.playMotion(86, wait_for_end=True)
robot.playMotion(87, wait_for_end=True)
rospy.sleep(1.0)
robot.moveGripper(left=40.0,right=40.0)
rospy.sleep(0.5)
robot.moveGripper(left=20.0,right=20.0)
rospy.sleep(1.0)
robot.playMotion(90, wait_for_end=True)
rospy.sleep(1.0)
currState = States.WALK_WITH_BAR
elif currState == States.WALK_WITH_BAR:
print("[WALK_WITH_BAR]")
robot.walking_params.append(robot.loadWalkingParams('pickup_param.yaml'))
#robot.walking_params[2].hip_pitch_offset = -5
robot.walking_params[2].x_move_amplitude = 0.005
robot.walking_params[2].y_move_amplitude = 0.000
#TODO change the a move amplitude to 1
robot.walking_params[2].angle_move_amplitude = 0 * DEGREE2RADIAN
robot.walk_set_param_pub.publish(robot.walking_params[2])
# Set ctrl module to walking, this actually only sets the legs
robot.setJointsControlModule(["r_hip_yaw","l_hip_yaw","r_hip_roll","l_hip_roll","r_hip_pitch",
"l_hip_pitch","r_knee","l_knee","r_ank_pitch","l_ank_pitch","r_ank_roll","l_ank_roll"],
["walking_module"])
print(robot.walking_params[2])
rospy.sleep(3)
robot.walkStart()
rospy.sleep(3)
robot.moveGripper(left=15.0,right=15.0)
rospy.sleep(9)
robot.walkStop()
currState = States.LIFT_BAR
elif currState == States.LIFT_BAR:
print("[LIFT_BAR]")
robot.setGeneralControlModule("none")
robot.setGeneralControlModule("action_module")
robot.playMotion(89, wait_for_end=True)
robot.setJointsControlModule(['head_pan', 'head_tilt'],['none','none'])
robot.moveHead(0,1.5)
currState = States.WALK_2_FINISH
elif currState == States.WALK_2_FINISH:
print("WALK_2_FINISH")
robot.walking_params.append(robot.loadWalkingParams('pickup_param.yaml'))
robot.walking_params[3].hip_pitch_offset = 1 * DEGREE2RADIAN #1.5
robot.walking_params[3].x_move_amplitude = 0
robot.walking_params[3].balance_enable = True
robot.walk_set_param_pub.publish(robot.walking_params[3])
# Set ctrl module to walking, this actually only sets the legs
robot.setJointsControlModule(["r_hip_yaw","l_hip_yaw","r_hip_roll","l_hip_roll","r_hip_pitch",
"l_hip_pitch","r_knee","l_knee","r_ank_pitch","l_ank_pitch","r_ank_roll","l_ank_roll"],
["walking_module"])
rospy.sleep(5)
robot.walkStart()
rospy.sleep(3)
robot.walking_params[3].x_move_amplitude = 0.005
robot.walk_set_param_pub.publish(robot.walking_params[3])
rospy.sleep(1117)
robot.walkStop()
currState = States.END
rate.sleep()
elif currState == States.END:
print("[END]")
#robot.walkStop()
rate.sleep()
|
normal
|
{
"blob_id": "b3a2db38e2074b02c8837bfce85d06598a7b194d",
"index": 5701,
"step-1": "<mask token>\n\n\nclass States:\n INIT = -1\n GET_READY = 1\n FIND_BAR = 2\n WALK_2_BAR = 3\n WALK_SIDEWAYS = 4\n PICK_BAR = 5\n WALK_WITH_BAR = 6\n LIFT_BAR = 7\n WALK_2_FINISH = 8\n END = 99\n\n\n<mask token>\n\n\ndef init():\n robot.setGeneralControlModule('action_module')\n robot.moveGripper(left=100.0, right=100.0)\n robot.playMotion(1, wait_for_end=True)\n robot.walk_set_param_pub.publish(robot.walking_params[0])\n robot.setGeneralControlModule('walking_module')\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none', 'none'])\n robot.setJointPos(['head_tilt'], [-0.7])\n rospy.sleep(1.0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass States:\n INIT = -1\n GET_READY = 1\n FIND_BAR = 2\n WALK_2_BAR = 3\n WALK_SIDEWAYS = 4\n PICK_BAR = 5\n WALK_WITH_BAR = 6\n LIFT_BAR = 7\n WALK_2_FINISH = 8\n END = 99\n\n\nrospy.init_node('fira_weightlifting')\n<mask token>\nwhile not rospy.is_shutdown():\n if '/op3_manager' in rosnode.get_node_names():\n rospy.loginfo('Found op3 manager')\n break\n else:\n rospy.loginfo('Waiting for op3 manager')\n rospy.Rate(20).sleep()\nrospy.sleep(4)\n<mask token>\n\n\ndef init():\n robot.setGeneralControlModule('action_module')\n robot.moveGripper(left=100.0, right=100.0)\n robot.playMotion(1, wait_for_end=True)\n robot.walk_set_param_pub.publish(robot.walking_params[0])\n robot.setGeneralControlModule('walking_module')\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none', 'none'])\n robot.setJointPos(['head_tilt'], [-0.7])\n rospy.sleep(1.0)\n\n\n<mask token>\nwhile not rospy.is_shutdown():\n ret, frame = cap.read()\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.\n INTER_CUBIC)\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n cnts_yellow = findYellowCnts(hsv_frame)\n cnts_red = findRedCnts(hsv_frame)\n delta_head = 0\n delta_lr = 0\n bar_slope = 0\n if cnts_yellow is not None and cnts_red is not None:\n cx_y, cy_y = findCentroid(cnts_yellow)\n cx_r, cy_r = findCentroid(cnts_red)\n delta_lr = focusCenter(hsv_frame, cx_y, cx_r)\n delta_head = headTilt(hsv_frame, cy_y, cy_r)\n bar_slope = slope(cx_y, cy_y, cx_r, cy_r)\n cv2.drawContours(hsv_frame, cnts_yellow, -1, (255, 0, 0), 2)\n cv2.drawContours(hsv_frame, cnts_red, -1, (10, 235, 290), 2)\n cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / \n 2)), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (int(frame.shape[1] / 2), int(frame.shape[0] /\n 2)), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_y, cy_y), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_r, cy_r), 5, (130, 40, 255), -1)\n if currState == States.INIT:\n init()\n currState = States.GET_READY\n elif currState == States.GET_READY:\n print('[GET_READY]')\n if robot.get_pressed_button() == 'start':\n currState = States.FIND_BAR\n elif currState == States.FIND_BAR:\n print('[FIND_BAR]')\n robot.walking_params.append(robot.loadWalkingParams('param.yaml'))\n robot.setGeneralControlModule('walking_module')\n robot.walking_params[1].x_move_amplitude = 0.005\n robot.walking_params[1].balance_enable = False\n robot.walking_params[1].y_move_amplitude = 0.003\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n currState = States.WALK_2_BAR\n elif currState == States.WALK_2_BAR:\n print('[WALK_2_BAR]')\n head_tilt_delta = delta_head * 0.01\n current_head_tilt += head_tilt_delta\n current_head_tilt = max(current_head_tilt, -1.2)\n print('current head: {}, head_tilt_delta: {}'.format(\n current_head_tilt, head_tilt_delta))\n robot.moveHead(None, current_head_tilt)\n print('delta_lr: {}'.format(delta_lr))\n ratio = 1\n angle_delta = delta_lr * ratio\n print('*********************************************')\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print('angle_move_amp: ', angle_delta)\n \"\"\"\n if(delta_lr > 20):\n print(\"GO LEFT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n elif(delta_lr < -20):\n print(\"GO RIGHT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n else:\n print(\"GO FORWARD\")\n robot.walking_params[1].angle_move_amplitude = 0\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta) \n \"\"\"\n if current_head_tilt == -1.2:\n robot.walkStop()\n robot.onlineWalkSetup(x=0.02, z=-0.025, foot_dist=0.08,\n foot_height=0.05)\n currState = States.WALK_SIDEWAYS\n continue\n elif currState == States.WALK_SIDEWAYS:\n ret, frame = cap.read()\n print('bar_slope: {}'.format(bar_slope))\n bar_x = (cx_y + cx_r) / 2\n bar_y = (cy_y + cy_r) / 2\n print('bar_location: ({},{})'.format(bar_x, bar_y))\n x_err = bar_x - hsv_frame.shape[1] / 2\n y_err = bar_y - hsv_frame.shape[0] * 2 / 3\n print('bar_error: ({},{})'.format(x_err, y_err))\n \"\"\"\n if y_err > 20:\n print('back')\n robot.onlineWalkCommand(direction=\"backward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.5)\n rospy.sleep(2)\n \"\"\"\n if bar_slope <= -0.07:\n print('turn left')\n robot.onlineWalkCommand(direction='turn_left', start_leg='left',\n step_num=2, front_length=0.0, step_angle=10.0, step_time=0.4)\n rospy.sleep(2)\n elif bar_slope > 0.07:\n print('turn right')\n robot.onlineWalkCommand(direction='turn_right', start_leg=\n 'right', step_num=2, front_length=0.0, step_angle=10.0,\n step_time=0.4)\n rospy.sleep(2)\n \"\"\" \n elif x_err > 30:\n print('shift right')\n robot.onlineWalkCommand(direction=\"right\", start_leg=\"right\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif x_err < -30:\n print('shift left')\n robot.onlineWalkCommand(direction=\"left\", start_leg=\"left\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif y_err < -20:\n print('forward')\n robot.onlineWalkCommand(direction=\"forward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.4)\n rospy.sleep(2)\n \"\"\"\n else:\n print('success!!!')\n currState = States.PICK_BAR\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n \"\"\"\n print(\"[WALK_SIDEWAYS]\")\n print(\"bar_slope: {}\".format(bar_slope))\n \n if(bar_slope > 0.1):\n print(\"Turn facing right\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = -0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n \n elif(bar_slope < -0.1):\n print(\"Turn facing left\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = 0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n else:\n print(\"Keep facing forward\")\n \n currState = States.PICK_BAR\n \"\"\"\n elif currState == States.PICK_BAR:\n rospy.loginfo('[PICK_BAR]')\n robot.setGeneralControlModule('none')\n rospy.sleep(2)\n robot.setGeneralControlModule('action_module')\n robot.playMotion(86, wait_for_end=True)\n robot.playMotion(87, wait_for_end=True)\n rospy.sleep(1.0)\n robot.moveGripper(left=40.0, right=40.0)\n rospy.sleep(0.5)\n robot.moveGripper(left=20.0, right=20.0)\n rospy.sleep(1.0)\n robot.playMotion(90, wait_for_end=True)\n rospy.sleep(1.0)\n currState = States.WALK_WITH_BAR\n elif currState == States.WALK_WITH_BAR:\n print('[WALK_WITH_BAR]')\n robot.walking_params.append(robot.loadWalkingParams(\n 'pickup_param.yaml'))\n robot.walking_params[2].x_move_amplitude = 0.005\n robot.walking_params[2].y_move_amplitude = 0.0\n robot.walking_params[2].angle_move_amplitude = 0 * DEGREE2RADIAN\n robot.walk_set_param_pub.publish(robot.walking_params[2])\n robot.setJointsControlModule(['r_hip_yaw', 'l_hip_yaw',\n 'r_hip_roll', 'l_hip_roll', 'r_hip_pitch', 'l_hip_pitch',\n 'r_knee', 'l_knee', 'r_ank_pitch', 'l_ank_pitch', 'r_ank_roll',\n 'l_ank_roll'], ['walking_module'])\n print(robot.walking_params[2])\n rospy.sleep(3)\n robot.walkStart()\n rospy.sleep(3)\n robot.moveGripper(left=15.0, right=15.0)\n rospy.sleep(9)\n robot.walkStop()\n currState = States.LIFT_BAR\n elif currState == States.LIFT_BAR:\n print('[LIFT_BAR]')\n robot.setGeneralControlModule('none')\n robot.setGeneralControlModule('action_module')\n robot.playMotion(89, wait_for_end=True)\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none',\n 'none'])\n robot.moveHead(0, 1.5)\n currState = States.WALK_2_FINISH\n elif currState == States.WALK_2_FINISH:\n print('WALK_2_FINISH')\n robot.walking_params.append(robot.loadWalkingParams(\n 'pickup_param.yaml'))\n robot.walking_params[3].hip_pitch_offset = 1 * DEGREE2RADIAN\n robot.walking_params[3].x_move_amplitude = 0\n robot.walking_params[3].balance_enable = True\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n robot.setJointsControlModule(['r_hip_yaw', 'l_hip_yaw',\n 'r_hip_roll', 'l_hip_roll', 'r_hip_pitch', 'l_hip_pitch',\n 'r_knee', 'l_knee', 'r_ank_pitch', 'l_ank_pitch', 'r_ank_roll',\n 'l_ank_roll'], ['walking_module'])\n rospy.sleep(5)\n robot.walkStart()\n rospy.sleep(3)\n robot.walking_params[3].x_move_amplitude = 0.005\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n rospy.sleep(1117)\n robot.walkStop()\n currState = States.END\n rate.sleep()\n elif currState == States.END:\n print('[END]')\n rate.sleep()\n",
"step-3": "<mask token>\n\n\nclass States:\n INIT = -1\n GET_READY = 1\n FIND_BAR = 2\n WALK_2_BAR = 3\n WALK_SIDEWAYS = 4\n PICK_BAR = 5\n WALK_WITH_BAR = 6\n LIFT_BAR = 7\n WALK_2_FINISH = 8\n END = 99\n\n\nrospy.init_node('fira_weightlifting')\nrobot = Robot('fira_weightlifting')\nwhile not rospy.is_shutdown():\n if '/op3_manager' in rosnode.get_node_names():\n rospy.loginfo('Found op3 manager')\n break\n else:\n rospy.loginfo('Waiting for op3 manager')\n rospy.Rate(20).sleep()\nrospy.sleep(4)\nDEGREE2RADIAN = np.pi / 180\n\n\ndef init():\n robot.setGeneralControlModule('action_module')\n robot.moveGripper(left=100.0, right=100.0)\n robot.playMotion(1, wait_for_end=True)\n robot.walk_set_param_pub.publish(robot.walking_params[0])\n robot.setGeneralControlModule('walking_module')\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none', 'none'])\n robot.setJointPos(['head_tilt'], [-0.7])\n rospy.sleep(1.0)\n\n\ntickrate = 30\nrate = rospy.Rate(tickrate)\ncurrState = States.INIT\ncap = cv2.VideoCapture(0)\ncurrent_head_tilt = -0.7\nwhile not rospy.is_shutdown():\n ret, frame = cap.read()\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.\n INTER_CUBIC)\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n cnts_yellow = findYellowCnts(hsv_frame)\n cnts_red = findRedCnts(hsv_frame)\n delta_head = 0\n delta_lr = 0\n bar_slope = 0\n if cnts_yellow is not None and cnts_red is not None:\n cx_y, cy_y = findCentroid(cnts_yellow)\n cx_r, cy_r = findCentroid(cnts_red)\n delta_lr = focusCenter(hsv_frame, cx_y, cx_r)\n delta_head = headTilt(hsv_frame, cy_y, cy_r)\n bar_slope = slope(cx_y, cy_y, cx_r, cy_r)\n cv2.drawContours(hsv_frame, cnts_yellow, -1, (255, 0, 0), 2)\n cv2.drawContours(hsv_frame, cnts_red, -1, (10, 235, 290), 2)\n cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / \n 2)), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (int(frame.shape[1] / 2), int(frame.shape[0] /\n 2)), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_y, cy_y), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_r, cy_r), 5, (130, 40, 255), -1)\n if currState == States.INIT:\n init()\n currState = States.GET_READY\n elif currState == States.GET_READY:\n print('[GET_READY]')\n if robot.get_pressed_button() == 'start':\n currState = States.FIND_BAR\n elif currState == States.FIND_BAR:\n print('[FIND_BAR]')\n robot.walking_params.append(robot.loadWalkingParams('param.yaml'))\n robot.setGeneralControlModule('walking_module')\n robot.walking_params[1].x_move_amplitude = 0.005\n robot.walking_params[1].balance_enable = False\n robot.walking_params[1].y_move_amplitude = 0.003\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n currState = States.WALK_2_BAR\n elif currState == States.WALK_2_BAR:\n print('[WALK_2_BAR]')\n head_tilt_delta = delta_head * 0.01\n current_head_tilt += head_tilt_delta\n current_head_tilt = max(current_head_tilt, -1.2)\n print('current head: {}, head_tilt_delta: {}'.format(\n current_head_tilt, head_tilt_delta))\n robot.moveHead(None, current_head_tilt)\n print('delta_lr: {}'.format(delta_lr))\n ratio = 1\n angle_delta = delta_lr * ratio\n print('*********************************************')\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print('angle_move_amp: ', angle_delta)\n \"\"\"\n if(delta_lr > 20):\n print(\"GO LEFT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n elif(delta_lr < -20):\n print(\"GO RIGHT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n else:\n print(\"GO FORWARD\")\n robot.walking_params[1].angle_move_amplitude = 0\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta) \n \"\"\"\n if current_head_tilt == -1.2:\n robot.walkStop()\n robot.onlineWalkSetup(x=0.02, z=-0.025, foot_dist=0.08,\n foot_height=0.05)\n currState = States.WALK_SIDEWAYS\n continue\n elif currState == States.WALK_SIDEWAYS:\n ret, frame = cap.read()\n print('bar_slope: {}'.format(bar_slope))\n bar_x = (cx_y + cx_r) / 2\n bar_y = (cy_y + cy_r) / 2\n print('bar_location: ({},{})'.format(bar_x, bar_y))\n x_err = bar_x - hsv_frame.shape[1] / 2\n y_err = bar_y - hsv_frame.shape[0] * 2 / 3\n print('bar_error: ({},{})'.format(x_err, y_err))\n \"\"\"\n if y_err > 20:\n print('back')\n robot.onlineWalkCommand(direction=\"backward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.5)\n rospy.sleep(2)\n \"\"\"\n if bar_slope <= -0.07:\n print('turn left')\n robot.onlineWalkCommand(direction='turn_left', start_leg='left',\n step_num=2, front_length=0.0, step_angle=10.0, step_time=0.4)\n rospy.sleep(2)\n elif bar_slope > 0.07:\n print('turn right')\n robot.onlineWalkCommand(direction='turn_right', start_leg=\n 'right', step_num=2, front_length=0.0, step_angle=10.0,\n step_time=0.4)\n rospy.sleep(2)\n \"\"\" \n elif x_err > 30:\n print('shift right')\n robot.onlineWalkCommand(direction=\"right\", start_leg=\"right\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif x_err < -30:\n print('shift left')\n robot.onlineWalkCommand(direction=\"left\", start_leg=\"left\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif y_err < -20:\n print('forward')\n robot.onlineWalkCommand(direction=\"forward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.4)\n rospy.sleep(2)\n \"\"\"\n else:\n print('success!!!')\n currState = States.PICK_BAR\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n \"\"\"\n print(\"[WALK_SIDEWAYS]\")\n print(\"bar_slope: {}\".format(bar_slope))\n \n if(bar_slope > 0.1):\n print(\"Turn facing right\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = -0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n \n elif(bar_slope < -0.1):\n print(\"Turn facing left\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = 0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n else:\n print(\"Keep facing forward\")\n \n currState = States.PICK_BAR\n \"\"\"\n elif currState == States.PICK_BAR:\n rospy.loginfo('[PICK_BAR]')\n robot.setGeneralControlModule('none')\n rospy.sleep(2)\n robot.setGeneralControlModule('action_module')\n robot.playMotion(86, wait_for_end=True)\n robot.playMotion(87, wait_for_end=True)\n rospy.sleep(1.0)\n robot.moveGripper(left=40.0, right=40.0)\n rospy.sleep(0.5)\n robot.moveGripper(left=20.0, right=20.0)\n rospy.sleep(1.0)\n robot.playMotion(90, wait_for_end=True)\n rospy.sleep(1.0)\n currState = States.WALK_WITH_BAR\n elif currState == States.WALK_WITH_BAR:\n print('[WALK_WITH_BAR]')\n robot.walking_params.append(robot.loadWalkingParams(\n 'pickup_param.yaml'))\n robot.walking_params[2].x_move_amplitude = 0.005\n robot.walking_params[2].y_move_amplitude = 0.0\n robot.walking_params[2].angle_move_amplitude = 0 * DEGREE2RADIAN\n robot.walk_set_param_pub.publish(robot.walking_params[2])\n robot.setJointsControlModule(['r_hip_yaw', 'l_hip_yaw',\n 'r_hip_roll', 'l_hip_roll', 'r_hip_pitch', 'l_hip_pitch',\n 'r_knee', 'l_knee', 'r_ank_pitch', 'l_ank_pitch', 'r_ank_roll',\n 'l_ank_roll'], ['walking_module'])\n print(robot.walking_params[2])\n rospy.sleep(3)\n robot.walkStart()\n rospy.sleep(3)\n robot.moveGripper(left=15.0, right=15.0)\n rospy.sleep(9)\n robot.walkStop()\n currState = States.LIFT_BAR\n elif currState == States.LIFT_BAR:\n print('[LIFT_BAR]')\n robot.setGeneralControlModule('none')\n robot.setGeneralControlModule('action_module')\n robot.playMotion(89, wait_for_end=True)\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none',\n 'none'])\n robot.moveHead(0, 1.5)\n currState = States.WALK_2_FINISH\n elif currState == States.WALK_2_FINISH:\n print('WALK_2_FINISH')\n robot.walking_params.append(robot.loadWalkingParams(\n 'pickup_param.yaml'))\n robot.walking_params[3].hip_pitch_offset = 1 * DEGREE2RADIAN\n robot.walking_params[3].x_move_amplitude = 0\n robot.walking_params[3].balance_enable = True\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n robot.setJointsControlModule(['r_hip_yaw', 'l_hip_yaw',\n 'r_hip_roll', 'l_hip_roll', 'r_hip_pitch', 'l_hip_pitch',\n 'r_knee', 'l_knee', 'r_ank_pitch', 'l_ank_pitch', 'r_ank_roll',\n 'l_ank_roll'], ['walking_module'])\n rospy.sleep(5)\n robot.walkStart()\n rospy.sleep(3)\n robot.walking_params[3].x_move_amplitude = 0.005\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n rospy.sleep(1117)\n robot.walkStop()\n currState = States.END\n rate.sleep()\n elif currState == States.END:\n print('[END]')\n rate.sleep()\n",
"step-4": "import rospy\nfrom op3_utils.op3_utils import *\nfrom vision import *\nimport cv2\nimport sys\nimport rosnode\n\n\nclass States:\n INIT = -1\n GET_READY = 1\n FIND_BAR = 2\n WALK_2_BAR = 3\n WALK_SIDEWAYS = 4\n PICK_BAR = 5\n WALK_WITH_BAR = 6\n LIFT_BAR = 7\n WALK_2_FINISH = 8\n END = 99\n\n\nrospy.init_node('fira_weightlifting')\nrobot = Robot('fira_weightlifting')\nwhile not rospy.is_shutdown():\n if '/op3_manager' in rosnode.get_node_names():\n rospy.loginfo('Found op3 manager')\n break\n else:\n rospy.loginfo('Waiting for op3 manager')\n rospy.Rate(20).sleep()\nrospy.sleep(4)\nDEGREE2RADIAN = np.pi / 180\n\n\ndef init():\n robot.setGeneralControlModule('action_module')\n robot.moveGripper(left=100.0, right=100.0)\n robot.playMotion(1, wait_for_end=True)\n robot.walk_set_param_pub.publish(robot.walking_params[0])\n robot.setGeneralControlModule('walking_module')\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none', 'none'])\n robot.setJointPos(['head_tilt'], [-0.7])\n rospy.sleep(1.0)\n\n\ntickrate = 30\nrate = rospy.Rate(tickrate)\ncurrState = States.INIT\ncap = cv2.VideoCapture(0)\ncurrent_head_tilt = -0.7\nwhile not rospy.is_shutdown():\n ret, frame = cap.read()\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.\n INTER_CUBIC)\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n cnts_yellow = findYellowCnts(hsv_frame)\n cnts_red = findRedCnts(hsv_frame)\n delta_head = 0\n delta_lr = 0\n bar_slope = 0\n if cnts_yellow is not None and cnts_red is not None:\n cx_y, cy_y = findCentroid(cnts_yellow)\n cx_r, cy_r = findCentroid(cnts_red)\n delta_lr = focusCenter(hsv_frame, cx_y, cx_r)\n delta_head = headTilt(hsv_frame, cy_y, cy_r)\n bar_slope = slope(cx_y, cy_y, cx_r, cy_r)\n cv2.drawContours(hsv_frame, cnts_yellow, -1, (255, 0, 0), 2)\n cv2.drawContours(hsv_frame, cnts_red, -1, (10, 235, 290), 2)\n cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / \n 2)), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (int(frame.shape[1] / 2), int(frame.shape[0] /\n 2)), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_y, cy_y), 5, (130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_r, cy_r), 5, (130, 40, 255), -1)\n if currState == States.INIT:\n init()\n currState = States.GET_READY\n elif currState == States.GET_READY:\n print('[GET_READY]')\n if robot.get_pressed_button() == 'start':\n currState = States.FIND_BAR\n elif currState == States.FIND_BAR:\n print('[FIND_BAR]')\n robot.walking_params.append(robot.loadWalkingParams('param.yaml'))\n robot.setGeneralControlModule('walking_module')\n robot.walking_params[1].x_move_amplitude = 0.005\n robot.walking_params[1].balance_enable = False\n robot.walking_params[1].y_move_amplitude = 0.003\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n currState = States.WALK_2_BAR\n elif currState == States.WALK_2_BAR:\n print('[WALK_2_BAR]')\n head_tilt_delta = delta_head * 0.01\n current_head_tilt += head_tilt_delta\n current_head_tilt = max(current_head_tilt, -1.2)\n print('current head: {}, head_tilt_delta: {}'.format(\n current_head_tilt, head_tilt_delta))\n robot.moveHead(None, current_head_tilt)\n print('delta_lr: {}'.format(delta_lr))\n ratio = 1\n angle_delta = delta_lr * ratio\n print('*********************************************')\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print('angle_move_amp: ', angle_delta)\n \"\"\"\n if(delta_lr > 20):\n print(\"GO LEFT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n elif(delta_lr < -20):\n print(\"GO RIGHT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n else:\n print(\"GO FORWARD\")\n robot.walking_params[1].angle_move_amplitude = 0\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta) \n \"\"\"\n if current_head_tilt == -1.2:\n robot.walkStop()\n robot.onlineWalkSetup(x=0.02, z=-0.025, foot_dist=0.08,\n foot_height=0.05)\n currState = States.WALK_SIDEWAYS\n continue\n elif currState == States.WALK_SIDEWAYS:\n ret, frame = cap.read()\n print('bar_slope: {}'.format(bar_slope))\n bar_x = (cx_y + cx_r) / 2\n bar_y = (cy_y + cy_r) / 2\n print('bar_location: ({},{})'.format(bar_x, bar_y))\n x_err = bar_x - hsv_frame.shape[1] / 2\n y_err = bar_y - hsv_frame.shape[0] * 2 / 3\n print('bar_error: ({},{})'.format(x_err, y_err))\n \"\"\"\n if y_err > 20:\n print('back')\n robot.onlineWalkCommand(direction=\"backward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.5)\n rospy.sleep(2)\n \"\"\"\n if bar_slope <= -0.07:\n print('turn left')\n robot.onlineWalkCommand(direction='turn_left', start_leg='left',\n step_num=2, front_length=0.0, step_angle=10.0, step_time=0.4)\n rospy.sleep(2)\n elif bar_slope > 0.07:\n print('turn right')\n robot.onlineWalkCommand(direction='turn_right', start_leg=\n 'right', step_num=2, front_length=0.0, step_angle=10.0,\n step_time=0.4)\n rospy.sleep(2)\n \"\"\" \n elif x_err > 30:\n print('shift right')\n robot.onlineWalkCommand(direction=\"right\", start_leg=\"right\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif x_err < -30:\n print('shift left')\n robot.onlineWalkCommand(direction=\"left\", start_leg=\"left\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif y_err < -20:\n print('forward')\n robot.onlineWalkCommand(direction=\"forward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.4)\n rospy.sleep(2)\n \"\"\"\n else:\n print('success!!!')\n currState = States.PICK_BAR\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n \"\"\"\n print(\"[WALK_SIDEWAYS]\")\n print(\"bar_slope: {}\".format(bar_slope))\n \n if(bar_slope > 0.1):\n print(\"Turn facing right\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = -0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n \n elif(bar_slope < -0.1):\n print(\"Turn facing left\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = 0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n else:\n print(\"Keep facing forward\")\n \n currState = States.PICK_BAR\n \"\"\"\n elif currState == States.PICK_BAR:\n rospy.loginfo('[PICK_BAR]')\n robot.setGeneralControlModule('none')\n rospy.sleep(2)\n robot.setGeneralControlModule('action_module')\n robot.playMotion(86, wait_for_end=True)\n robot.playMotion(87, wait_for_end=True)\n rospy.sleep(1.0)\n robot.moveGripper(left=40.0, right=40.0)\n rospy.sleep(0.5)\n robot.moveGripper(left=20.0, right=20.0)\n rospy.sleep(1.0)\n robot.playMotion(90, wait_for_end=True)\n rospy.sleep(1.0)\n currState = States.WALK_WITH_BAR\n elif currState == States.WALK_WITH_BAR:\n print('[WALK_WITH_BAR]')\n robot.walking_params.append(robot.loadWalkingParams(\n 'pickup_param.yaml'))\n robot.walking_params[2].x_move_amplitude = 0.005\n robot.walking_params[2].y_move_amplitude = 0.0\n robot.walking_params[2].angle_move_amplitude = 0 * DEGREE2RADIAN\n robot.walk_set_param_pub.publish(robot.walking_params[2])\n robot.setJointsControlModule(['r_hip_yaw', 'l_hip_yaw',\n 'r_hip_roll', 'l_hip_roll', 'r_hip_pitch', 'l_hip_pitch',\n 'r_knee', 'l_knee', 'r_ank_pitch', 'l_ank_pitch', 'r_ank_roll',\n 'l_ank_roll'], ['walking_module'])\n print(robot.walking_params[2])\n rospy.sleep(3)\n robot.walkStart()\n rospy.sleep(3)\n robot.moveGripper(left=15.0, right=15.0)\n rospy.sleep(9)\n robot.walkStop()\n currState = States.LIFT_BAR\n elif currState == States.LIFT_BAR:\n print('[LIFT_BAR]')\n robot.setGeneralControlModule('none')\n robot.setGeneralControlModule('action_module')\n robot.playMotion(89, wait_for_end=True)\n robot.setJointsControlModule(['head_pan', 'head_tilt'], ['none',\n 'none'])\n robot.moveHead(0, 1.5)\n currState = States.WALK_2_FINISH\n elif currState == States.WALK_2_FINISH:\n print('WALK_2_FINISH')\n robot.walking_params.append(robot.loadWalkingParams(\n 'pickup_param.yaml'))\n robot.walking_params[3].hip_pitch_offset = 1 * DEGREE2RADIAN\n robot.walking_params[3].x_move_amplitude = 0\n robot.walking_params[3].balance_enable = True\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n robot.setJointsControlModule(['r_hip_yaw', 'l_hip_yaw',\n 'r_hip_roll', 'l_hip_roll', 'r_hip_pitch', 'l_hip_pitch',\n 'r_knee', 'l_knee', 'r_ank_pitch', 'l_ank_pitch', 'r_ank_roll',\n 'l_ank_roll'], ['walking_module'])\n rospy.sleep(5)\n robot.walkStart()\n rospy.sleep(3)\n robot.walking_params[3].x_move_amplitude = 0.005\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n rospy.sleep(1117)\n robot.walkStop()\n currState = States.END\n rate.sleep()\n elif currState == States.END:\n print('[END]')\n rate.sleep()\n",
"step-5": "#!/usr/bin/env python\n\nimport rospy\nfrom op3_utils.op3_utils import *\nfrom vision import *\nimport cv2\nimport sys\nimport rosnode\n\n#Yellow >> Right\n#Red >> Left\n\nclass States:\n INIT = -1\n GET_READY = 1\n FIND_BAR = 2\n WALK_2_BAR = 3\n WALK_SIDEWAYS = 4\n PICK_BAR = 5\n WALK_WITH_BAR = 6\n LIFT_BAR = 7\n WALK_2_FINISH = 8\n END = 99\n\n# Iinitialize Node\nrospy.init_node('fira_weightlifting')\n\n\n# Create robot ('package_name')\nrobot = Robot('fira_weightlifting')\n\n\nwhile not rospy.is_shutdown():\n if '/op3_manager' in rosnode.get_node_names():\n rospy.loginfo('Found op3 manager')\n break\n else:\n rospy.loginfo('Waiting for op3 manager')\n rospy.Rate(20).sleep()\n\n\n\n# Make sure every publisher has registered to their topic,\n# avoiding lost messages\nrospy.sleep(4) \n\nDEGREE2RADIAN = np.pi / 180\n\ndef init():\n # Set ctrl modules of all actions to joint, so we can reset robot position\n robot.setGeneralControlModule(\"action_module\")\n \n robot.moveGripper(left=100.0,right=100.0)\n #robot.setGrippersPos(left=0.0, right=0.0)\n # >0 is opened\n \n # Call initial robot position\n robot.playMotion(1, wait_for_end=True)\n\n # Set ctrl module to walking, this actually only sets the legs\n robot.walk_set_param_pub.publish(robot.walking_params[0])\n robot.setGeneralControlModule(\"walking_module\")\n \n # Set joint modules of head joints to none so we can control them directly\n robot.setJointsControlModule([\"head_pan\", \"head_tilt\"], [\"none\", \"none\"])\n \n \n robot.setJointPos([\"head_tilt\"], [-0.7])\n #0 is looking straight forward, <0 is looking down\n\n rospy.sleep(1.0)\n\ntickrate = 30\nrate = rospy.Rate(tickrate)\n\ncurrState = States.INIT\n\ncap = cv2.VideoCapture(0)\n\ncurrent_head_tilt = -0.7\nwhile not rospy.is_shutdown():\n \n \n \n ret, frame = cap.read()\n\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n frame = cv2.resize(frame, (0,0),fx=0.5,fy=0.5, interpolation=cv2.INTER_CUBIC)\n hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n \n cnts_yellow = findYellowCnts(hsv_frame)\n cnts_red = findRedCnts(hsv_frame)\n delta_head = 0\n delta_lr = 0\n bar_slope = 0\n if (cnts_yellow is not None and cnts_red is not None):\n cx_y, cy_y = findCentroid(cnts_yellow)\n cx_r, cy_r = findCentroid(cnts_red)\n\n delta_lr = focusCenter(hsv_frame, cx_y, cx_r)\n #print('delta_lr: ' + str(delta_lr))\n delta_head = headTilt(hsv_frame, cy_y, cy_r)\n bar_slope = slope(cx_y, cy_y, cx_r, cy_r)\n\n cv2.drawContours(hsv_frame, cnts_yellow, -1, (255,0,0), 2)\n cv2.drawContours(hsv_frame, cnts_red, -1, (10,235,290), 2)\n \n cv2.circle(hsv_frame, (int((cx_y + cx_r) / 2), int((cy_y + cy_r) / 2)),5,(130, 40, 255), -1)\n cv2.circle(hsv_frame, (int(frame.shape[1]/2), int(frame.shape[0]/2)),5,(130, 40, 255), -1)\n \n cv2.circle(hsv_frame, (cx_y, cy_y),5,(130, 40, 255), -1)\n cv2.circle(hsv_frame, (cx_r, cy_r),5,(130, 40, 255), -1)\n\n #cv2.imshow('Current view',hsv_frame)\n #cv2.waitKey(33)\n \n if currState == States.INIT:\n init()\n currState = States.GET_READY \n \n elif currState == States.GET_READY:\n print(\"[GET_READY]\")\n if robot.get_pressed_button() == 'start':\n currState = States.FIND_BAR\n #if cv2.waitKey(33) &0xFF == ord('f'):\n # currState = States.FIND_BAR\n\n elif currState == States.FIND_BAR:\n print(\"[FIND_BAR]\")\n robot.walking_params.append(robot.loadWalkingParams('param.yaml')) \n robot.setGeneralControlModule(\"walking_module\")\n robot.walking_params[1].x_move_amplitude = 0.005\n robot.walking_params[1].balance_enable = False\n robot.walking_params[1].y_move_amplitude = 0.003\n #robot.walking_params[1].angle_move_amplitude = 1.75 * DEGREE2RADIAN\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n currState = States.WALK_2_BAR\n \n elif currState == States.WALK_2_BAR:\n print(\"[WALK_2_BAR]\")\n #if(delta_head < -10):\n \n head_tilt_delta = delta_head * 0.01\n current_head_tilt += head_tilt_delta\n current_head_tilt = max(current_head_tilt,-1.2)\n print('current head: {}, head_tilt_delta: {}'.format(current_head_tilt,head_tilt_delta))\n \n robot.moveHead(None, current_head_tilt)\n print(\"delta_lr: {}\".format(delta_lr))\n ratio = 1\n angle_delta = delta_lr * ratio\n print(\"*********************************************\")\n \n \n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n '''\n if(delta_lr > 20):\n print(\"GO LEFT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n elif(delta_lr < -20):\n print(\"GO RIGHT\")\n robot.walking_params[1].angle_move_amplitude = angle_delta\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta)\n \n else:\n print(\"GO FORWARD\")\n robot.walking_params[1].angle_move_amplitude = 0\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n print(\"angle_move_amp: \", angle_delta) \n '''\n \n \n if(current_head_tilt == -1.2):\n \n robot.walkStop()\n robot.onlineWalkSetup(x=0.02, z=-0.025, foot_dist=0.08, foot_height=0.05)\n currState = States.WALK_SIDEWAYS\n continue\n \n elif currState == States.WALK_SIDEWAYS:\n ret, frame = cap.read()\n print(\"bar_slope: {}\".format(bar_slope))\n\n bar_x = (cx_y + cx_r) / 2\n bar_y = (cy_y + cy_r) / 2\n print(\"bar_location: ({},{})\".format(bar_x,bar_y))\n x_err = bar_x - hsv_frame.shape[1] / 2\n y_err = bar_y - hsv_frame.shape[0] *2 / 3\n print(\"bar_error: ({},{})\".format(x_err,y_err))\n '''\n if y_err > 20:\n print('back')\n robot.onlineWalkCommand(direction=\"backward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.5)\n rospy.sleep(2)\n ''' \n \n if bar_slope <= -0.07:\n print('turn left')\n robot.onlineWalkCommand(direction=\"turn_left\", start_leg=\"left\", step_num=2,\n front_length=0.0, step_angle=10.0,step_time=0.4)\n rospy.sleep(2)\n\n elif bar_slope > 0.07:\n print('turn right')\n\n robot.onlineWalkCommand(direction=\"turn_right\", start_leg=\"right\", step_num=2,\n front_length=0.0, step_angle=10.0,step_time=0.4)\n rospy.sleep(2)\n ''' \n elif x_err > 30:\n print('shift right')\n robot.onlineWalkCommand(direction=\"right\", start_leg=\"right\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif x_err < -30:\n print('shift left')\n robot.onlineWalkCommand(direction=\"left\", start_leg=\"left\", step_num=2,\n side_length=0.01, step_time=0.4)\n rospy.sleep(2.5)\n \n elif y_err < -20:\n print('forward')\n robot.onlineWalkCommand(direction=\"forward\", start_leg=\"right\", step_num=2,\n front_length=0.02, step_time=0.4)\n rospy.sleep(2)\n '''\n else: \n print('success!!!')\n # TODO removed sleep here\n #rospy.sleep(6)\n currState = States.PICK_BAR\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n ret, frame = cap.read()\n\n \n \n \n '''\n print(\"[WALK_SIDEWAYS]\")\n print(\"bar_slope: {}\".format(bar_slope))\n \n if(bar_slope > 0.1):\n print(\"Turn facing right\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = -0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n \n elif(bar_slope < -0.1):\n print(\"Turn facing left\")\n robot.walking_params[1].x_move_amplitude = 0\n robot.walking_params[1].y_move_amplitude = 0.01\n robot.walk_set_param_pub.publish(robot.walking_params[1])\n rospy.sleep(2)\n robot.walkStart()\n rospy.sleep(2)\n robot.walkStop()\n else:\n print(\"Keep facing forward\")\n \n currState = States.PICK_BAR\n '''\n elif currState == States.PICK_BAR:\n rospy.loginfo(\"[PICK_BAR]\")\n # TODO testing\n #rospy.sleep(2)\n robot.setGeneralControlModule(\"none\")\n rospy.sleep(2)\n robot.setGeneralControlModule(\"action_module\")\n \n robot.playMotion(86, wait_for_end=True)\n robot.playMotion(87, wait_for_end=True)\n rospy.sleep(1.0)\n robot.moveGripper(left=40.0,right=40.0)\n rospy.sleep(0.5)\n robot.moveGripper(left=20.0,right=20.0) \n rospy.sleep(1.0)\n robot.playMotion(90, wait_for_end=True)\n rospy.sleep(1.0)\n currState = States.WALK_WITH_BAR\n\n elif currState == States.WALK_WITH_BAR:\n print(\"[WALK_WITH_BAR]\")\n \n \n robot.walking_params.append(robot.loadWalkingParams('pickup_param.yaml'))\n #robot.walking_params[2].hip_pitch_offset = -5\n robot.walking_params[2].x_move_amplitude = 0.005\n robot.walking_params[2].y_move_amplitude = 0.000\n #TODO change the a move amplitude to 1\n robot.walking_params[2].angle_move_amplitude = 0 * DEGREE2RADIAN\n robot.walk_set_param_pub.publish(robot.walking_params[2])\n # Set ctrl module to walking, this actually only sets the legs\n robot.setJointsControlModule([\"r_hip_yaw\",\"l_hip_yaw\",\"r_hip_roll\",\"l_hip_roll\",\"r_hip_pitch\",\n \"l_hip_pitch\",\"r_knee\",\"l_knee\",\"r_ank_pitch\",\"l_ank_pitch\",\"r_ank_roll\",\"l_ank_roll\"],\n [\"walking_module\"])\n print(robot.walking_params[2])\n rospy.sleep(3)\n robot.walkStart()\n rospy.sleep(3)\n robot.moveGripper(left=15.0,right=15.0) \n rospy.sleep(9)\n\n robot.walkStop()\n currState = States.LIFT_BAR\n\n elif currState == States.LIFT_BAR:\n print(\"[LIFT_BAR]\")\n robot.setGeneralControlModule(\"none\")\n robot.setGeneralControlModule(\"action_module\")\n robot.playMotion(89, wait_for_end=True)\n robot.setJointsControlModule(['head_pan', 'head_tilt'],['none','none'])\n robot.moveHead(0,1.5)\n currState = States.WALK_2_FINISH\n\n elif currState == States.WALK_2_FINISH:\n print(\"WALK_2_FINISH\")\n \n robot.walking_params.append(robot.loadWalkingParams('pickup_param.yaml'))\n robot.walking_params[3].hip_pitch_offset = 1 * DEGREE2RADIAN #1.5\n robot.walking_params[3].x_move_amplitude = 0\n robot.walking_params[3].balance_enable = True\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n \n # Set ctrl module to walking, this actually only sets the legs\n robot.setJointsControlModule([\"r_hip_yaw\",\"l_hip_yaw\",\"r_hip_roll\",\"l_hip_roll\",\"r_hip_pitch\",\n \"l_hip_pitch\",\"r_knee\",\"l_knee\",\"r_ank_pitch\",\"l_ank_pitch\",\"r_ank_roll\",\"l_ank_roll\"],\n [\"walking_module\"])\n rospy.sleep(5)\n robot.walkStart()\n rospy.sleep(3)\n robot.walking_params[3].x_move_amplitude = 0.005\n robot.walk_set_param_pub.publish(robot.walking_params[3])\n rospy.sleep(1117)\n robot.walkStop()\n currState = States.END\n rate.sleep() \n elif currState == States.END:\n print(\"[END]\")\n #robot.walkStop()\n\n \n rate.sleep()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original))
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
<|reserved_special_token_0|>
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
<|reserved_special_token_0|>
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os / 100)
den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)
zeta = num_zet / den_zet
omega = 4 / (zeta * settle_t)
m_spr = 1
k_spr = omega ** 2
c_spr = 2 * zeta * omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,
'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,
'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original))
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
<|reserved_special_token_0|>
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
def char_ests(time_c, pos_c, c_initial, c_max, c_final):
"""
This function estimates the characteristics of the waveform we're analyzing
:param time_c: A list of time values to determine the time it takes for certain things to occur
:param pos_c: A list of position values to determine the position at certain values of time
:param c_initial: The initial position value of our waveform
:param c_max: The maximum position value of our waveform
:param c_final: The final value of our waveform
:return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).
"""
maxdex = pos_c.index(c_max)
ten_perc = (c_final + c_initial) * 0.1
tr_10 = greater_than_index(pos_c, ten_perc)
ninety_p = (c_final + c_initial) * 0.9
tr_90 = greater_than_index(pos_c, ninety_p)
t_r = time_c[tr_10] - time_c[tr_90]
t_p = time_c[maxdex]
p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100
two_perc = (c_final - c_initial) * 0.02
c_thresh_low = c_final - two_perc
c_thresh_high = c_final + two_perc
mcfly = list(reversed(time_c))
beckett = list(reversed(pos_c))
minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(
beckett, c_thresh_high)]
t_s = mcfly[min(minlist)]
return t_r, t_p, p_os_fix, t_s
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os / 100)
den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)
zeta = num_zet / den_zet
omega = 4 / (zeta * settle_t)
m_spr = 1
k_spr = omega ** 2
c_spr = 2 * zeta * omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,
'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,
'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original))
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def less_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem <= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
def char_ests(time_c, pos_c, c_initial, c_max, c_final):
"""
This function estimates the characteristics of the waveform we're analyzing
:param time_c: A list of time values to determine the time it takes for certain things to occur
:param pos_c: A list of position values to determine the position at certain values of time
:param c_initial: The initial position value of our waveform
:param c_max: The maximum position value of our waveform
:param c_final: The final value of our waveform
:return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).
"""
maxdex = pos_c.index(c_max)
ten_perc = (c_final + c_initial) * 0.1
tr_10 = greater_than_index(pos_c, ten_perc)
ninety_p = (c_final + c_initial) * 0.9
tr_90 = greater_than_index(pos_c, ninety_p)
t_r = time_c[tr_10] - time_c[tr_90]
t_p = time_c[maxdex]
p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100
two_perc = (c_final - c_initial) * 0.02
c_thresh_low = c_final - two_perc
c_thresh_high = c_final + two_perc
mcfly = list(reversed(time_c))
beckett = list(reversed(pos_c))
minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(
beckett, c_thresh_high)]
t_s = mcfly[min(minlist)]
return t_r, t_p, p_os_fix, t_s
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os / 100)
den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)
zeta = num_zet / den_zet
omega = 4 / (zeta * settle_t)
m_spr = 1
k_spr = omega ** 2
c_spr = 2 * zeta * omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,
'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,
'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
if __name__ == '__main__':
print(analyze_data('data1.csv'))
<|reserved_special_token_1|>
import csv
import math
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original))
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def less_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem <= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
def char_ests(time_c, pos_c, c_initial, c_max, c_final):
"""
This function estimates the characteristics of the waveform we're analyzing
:param time_c: A list of time values to determine the time it takes for certain things to occur
:param pos_c: A list of position values to determine the position at certain values of time
:param c_initial: The initial position value of our waveform
:param c_max: The maximum position value of our waveform
:param c_final: The final value of our waveform
:return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).
"""
maxdex = pos_c.index(c_max)
ten_perc = (c_final + c_initial) * 0.1
tr_10 = greater_than_index(pos_c, ten_perc)
ninety_p = (c_final + c_initial) * 0.9
tr_90 = greater_than_index(pos_c, ninety_p)
t_r = time_c[tr_10] - time_c[tr_90]
t_p = time_c[maxdex]
p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100
two_perc = (c_final - c_initial) * 0.02
c_thresh_low = c_final - two_perc
c_thresh_high = c_final + two_perc
mcfly = list(reversed(time_c))
beckett = list(reversed(pos_c))
minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(
beckett, c_thresh_high)]
t_s = mcfly[min(minlist)]
return t_r, t_p, p_os_fix, t_s
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os / 100)
den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)
zeta = num_zet / den_zet
omega = 4 / (zeta * settle_t)
m_spr = 1
k_spr = omega ** 2
c_spr = 2 * zeta * omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,
'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,
'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
if __name__ == '__main__':
print(analyze_data('data1.csv'))
<|reserved_special_token_1|>
#!/usr/bin/env python3
import csv
import math
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original)) # list()
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def less_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem <= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
def char_ests(time_c, pos_c, c_initial, c_max, c_final):
"""
This function estimates the characteristics of the waveform we're analyzing
:param time_c: A list of time values to determine the time it takes for certain things to occur
:param pos_c: A list of position values to determine the position at certain values of time
:param c_initial: The initial position value of our waveform
:param c_max: The maximum position value of our waveform
:param c_final: The final value of our waveform
:return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).
"""
# Index values for time statements
maxdex = pos_c.index(c_max)
ten_perc = (c_final + c_initial) * 0.1
tr_10 = greater_than_index(pos_c, ten_perc)
ninety_p = (c_final + c_initial) * 0.9
tr_90 = greater_than_index(pos_c, ninety_p)
# Calculations
t_r = time_c[tr_10] - time_c[tr_90] # Rise time
t_p = time_c[maxdex] # Peak time
# Adjusted %OS eq
p_os_fix = ((c_max - c_final) / (c_final-c_initial)) * 100 # %OS
# two percent calcs
two_perc = (c_final - c_initial) * 0.02
c_thresh_low = c_final - two_perc
c_thresh_high = c_final + two_perc
mcfly = list(reversed(time_c))
beckett = list(reversed(pos_c))
minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(beckett, c_thresh_high)]
t_s = mcfly[min(minlist)] # Settling time
return t_r, t_p, p_os_fix, t_s
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os/100)
den_zet = math.sqrt(math.pi**2 + math.log(perc_os/100)**2)
zeta = num_zet/den_zet
omega = 4 / (zeta*settle_t)
m_spr = 1 # Told to assume mass is always 1 (unit)
k_spr = omega**2
c_spr = 2*zeta*omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f, 'rise_time': t_rise, 'peak_time': t_peak,
'perc_overshoot': percos, 'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
if __name__ == '__main__':
print(analyze_data('data1.csv'))
# print(analyze_data('data2.csv'))
# print(analyze_data('data3.csv'))
# print(analyze_data('data4.csv'))
|
flexible
|
{
"blob_id": "4545ce36c4d3df50e263d3323c04c53acb2b50e0",
"index": 7888,
"step-1": "<mask token>\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\n<mask token>\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\n<mask token>\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\n<mask token>\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\n \"\"\"\n This function estimates the characteristics of the waveform we're analyzing\n :param time_c: A list of time values to determine the time it takes for certain things to occur\n :param pos_c: A list of position values to determine the position at certain values of time\n :param c_initial: The initial position value of our waveform\n :param c_max: The maximum position value of our waveform\n :param c_final: The final value of our waveform\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\n \"\"\"\n maxdex = pos_c.index(c_max)\n ten_perc = (c_final + c_initial) * 0.1\n tr_10 = greater_than_index(pos_c, ten_perc)\n ninety_p = (c_final + c_initial) * 0.9\n tr_90 = greater_than_index(pos_c, ninety_p)\n t_r = time_c[tr_10] - time_c[tr_90]\n t_p = time_c[maxdex]\n p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100\n two_perc = (c_final - c_initial) * 0.02\n c_thresh_low = c_final - two_perc\n c_thresh_high = c_final + two_perc\n mcfly = list(reversed(time_c))\n beckett = list(reversed(pos_c))\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(\n beckett, c_thresh_high)]\n t_s = mcfly[min(minlist)]\n return t_r, t_p, p_os_fix, t_s\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef less_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem <= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\n \"\"\"\n This function estimates the characteristics of the waveform we're analyzing\n :param time_c: A list of time values to determine the time it takes for certain things to occur\n :param pos_c: A list of position values to determine the position at certain values of time\n :param c_initial: The initial position value of our waveform\n :param c_max: The maximum position value of our waveform\n :param c_final: The final value of our waveform\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\n \"\"\"\n maxdex = pos_c.index(c_max)\n ten_perc = (c_final + c_initial) * 0.1\n tr_10 = greater_than_index(pos_c, ten_perc)\n ninety_p = (c_final + c_initial) * 0.9\n tr_90 = greater_than_index(pos_c, ninety_p)\n t_r = time_c[tr_10] - time_c[tr_90]\n t_p = time_c[maxdex]\n p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100\n two_perc = (c_final - c_initial) * 0.02\n c_thresh_low = c_final - two_perc\n c_thresh_high = c_final + two_perc\n mcfly = list(reversed(time_c))\n beckett = list(reversed(pos_c))\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(\n beckett, c_thresh_high)]\n t_s = mcfly[min(minlist)]\n return t_r, t_p, p_os_fix, t_s\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\nif __name__ == '__main__':\n print(analyze_data('data1.csv'))\n",
"step-4": "import csv\nimport math\n\n\ndef load_data_from_file(filename):\n \"\"\"\n Load that data, my dude(tte)\n :param filename: The file from which you want to load data\n :return: Time and position data of the file\n \"\"\"\n time = []\n position = []\n with open(filename, 'r') as original:\n time_position = list(csv.reader(original))\n for row in range(1, len(time_position)):\n time.append(float(time_position[row][0]))\n position.append(float(time_position[row][1]))\n return time, position\n\n\ndef greater_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem >= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef less_than_index(numlist, singnum):\n \"\"\"\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\n list encounters a value greater than, or equal to, the value of interest.\n :param numlist: The list of ints\n :param singnum: The int to compare the list to\n :return: The index value of the position >= value of interest\n \"\"\"\n try:\n for elem in numlist:\n if elem <= singnum:\n e_val = numlist.index(elem)\n return e_val\n except ValueError:\n return 'None. Try a value contained within the list.'\n\n\ndef ini_max_fin(pos1):\n c_initial = pos1[0]\n c_max = max(pos1)\n c_final = pos1[-1]\n return c_initial, c_max, c_final\n\n\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\n \"\"\"\n This function estimates the characteristics of the waveform we're analyzing\n :param time_c: A list of time values to determine the time it takes for certain things to occur\n :param pos_c: A list of position values to determine the position at certain values of time\n :param c_initial: The initial position value of our waveform\n :param c_max: The maximum position value of our waveform\n :param c_final: The final value of our waveform\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\n \"\"\"\n maxdex = pos_c.index(c_max)\n ten_perc = (c_final + c_initial) * 0.1\n tr_10 = greater_than_index(pos_c, ten_perc)\n ninety_p = (c_final + c_initial) * 0.9\n tr_90 = greater_than_index(pos_c, ninety_p)\n t_r = time_c[tr_10] - time_c[tr_90]\n t_p = time_c[maxdex]\n p_os_fix = (c_max - c_final) / (c_final - c_initial) * 100\n two_perc = (c_final - c_initial) * 0.02\n c_thresh_low = c_final - two_perc\n c_thresh_high = c_final + two_perc\n mcfly = list(reversed(time_c))\n beckett = list(reversed(pos_c))\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(\n beckett, c_thresh_high)]\n t_s = mcfly[min(minlist)]\n return t_r, t_p, p_os_fix, t_s\n\n\ndef get_system_params(perc_os, settle_t):\n \"\"\"\n :param perc_os: The Overshoot Percentage value from which to calculate things \n :param settle_t: The settling time from which to calculate things\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\n \"\"\"\n num_zet = -math.log(perc_os / 100)\n den_zet = math.sqrt(math.pi ** 2 + math.log(perc_os / 100) ** 2)\n zeta = num_zet / den_zet\n omega = 4 / (zeta * settle_t)\n m_spr = 1\n k_spr = omega ** 2\n c_spr = 2 * zeta * omega\n return m_spr, k_spr, c_spr\n\n\ndef analyze_data(filename):\n \"\"\"\n :param filename: A name for the csv file to run the resulting operations \n :return: A dictionary with some gucci values\n \"\"\"\n backtime, backpos = load_data_from_file(filename)\n c_i, c_m, c_f = ini_max_fin(backpos)\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\n m, k, c = get_system_params(percos, t_set)\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f,\n 'rise_time': t_rise, 'peak_time': t_peak, 'perc_overshoot': percos,\n 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\n 'system_damping': c}\n true_dict = {}\n for key in sorted(dict_party):\n true_dict.update({key: dict_party[key]})\n return true_dict\n\n\nif __name__ == '__main__':\n print(analyze_data('data1.csv'))\n",
"step-5": "#!/usr/bin/env python3\r\n\r\nimport csv\r\nimport math\r\n\r\n\r\ndef load_data_from_file(filename):\r\n \"\"\"\r\n Load that data, my dude(tte)\r\n :param filename: The file from which you want to load data\r\n :return: Time and position data of the file\r\n \"\"\"\r\n time = []\r\n position = []\r\n with open(filename, 'r') as original:\r\n time_position = list(csv.reader(original)) # list()\r\n for row in range(1, len(time_position)):\r\n time.append(float(time_position[row][0]))\r\n position.append(float(time_position[row][1]))\r\n\r\n return time, position\r\n\r\n\r\ndef greater_than_index(numlist, singnum):\r\n \"\"\"\r\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\r\n list encounters a value greater than, or equal to, the value of interest.\r\n :param numlist: The list of ints\r\n :param singnum: The int to compare the list to\r\n :return: The index value of the position >= value of interest\r\n \"\"\"\r\n try:\r\n for elem in numlist:\r\n if elem >= singnum:\r\n e_val = numlist.index(elem)\r\n return e_val\r\n except ValueError:\r\n return 'None. Try a value contained within the list.'\r\n\r\n\r\ndef less_than_index(numlist, singnum):\r\n \"\"\"\r\n Function takes in a list of ints, compares them to a single int and returns the index value at which the\r\n list encounters a value greater than, or equal to, the value of interest.\r\n :param numlist: The list of ints\r\n :param singnum: The int to compare the list to\r\n :return: The index value of the position >= value of interest\r\n \"\"\"\r\n try:\r\n for elem in numlist:\r\n if elem <= singnum:\r\n e_val = numlist.index(elem)\r\n return e_val\r\n except ValueError:\r\n return 'None. Try a value contained within the list.'\r\n\r\n\r\ndef ini_max_fin(pos1):\r\n c_initial = pos1[0]\r\n c_max = max(pos1)\r\n c_final = pos1[-1]\r\n return c_initial, c_max, c_final\r\n\r\n\r\ndef char_ests(time_c, pos_c, c_initial, c_max, c_final):\r\n \"\"\"\r\n This function estimates the characteristics of the waveform we're analyzing\r\n :param time_c: A list of time values to determine the time it takes for certain things to occur\r\n :param pos_c: A list of position values to determine the position at certain values of time\r\n :param c_initial: The initial position value of our waveform\r\n :param c_max: The maximum position value of our waveform\r\n :param c_final: The final value of our waveform\r\n :return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).\r\n \"\"\"\r\n # Index values for time statements\r\n maxdex = pos_c.index(c_max)\r\n ten_perc = (c_final + c_initial) * 0.1\r\n tr_10 = greater_than_index(pos_c, ten_perc)\r\n ninety_p = (c_final + c_initial) * 0.9\r\n tr_90 = greater_than_index(pos_c, ninety_p)\r\n\r\n # Calculations\r\n t_r = time_c[tr_10] - time_c[tr_90] # Rise time\r\n t_p = time_c[maxdex] # Peak time\r\n\r\n # Adjusted %OS eq\r\n p_os_fix = ((c_max - c_final) / (c_final-c_initial)) * 100 # %OS\r\n\r\n # two percent calcs\r\n two_perc = (c_final - c_initial) * 0.02\r\n c_thresh_low = c_final - two_perc\r\n c_thresh_high = c_final + two_perc\r\n mcfly = list(reversed(time_c))\r\n beckett = list(reversed(pos_c))\r\n minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(beckett, c_thresh_high)]\r\n\r\n t_s = mcfly[min(minlist)] # Settling time\r\n\r\n return t_r, t_p, p_os_fix, t_s\r\n\r\n\r\ndef get_system_params(perc_os, settle_t):\r\n \"\"\"\r\n :param perc_os: The Overshoot Percentage value from which to calculate things \r\n :param settle_t: The settling time from which to calculate things\r\n :return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)\r\n \"\"\"\r\n\r\n num_zet = -math.log(perc_os/100)\r\n den_zet = math.sqrt(math.pi**2 + math.log(perc_os/100)**2)\r\n zeta = num_zet/den_zet\r\n omega = 4 / (zeta*settle_t)\r\n m_spr = 1 # Told to assume mass is always 1 (unit)\r\n k_spr = omega**2\r\n c_spr = 2*zeta*omega\r\n return m_spr, k_spr, c_spr\r\n\r\n\r\ndef analyze_data(filename):\r\n \"\"\"\r\n :param filename: A name for the csv file to run the resulting operations \r\n :return: A dictionary with some gucci values\r\n \"\"\"\r\n backtime, backpos = load_data_from_file(filename)\r\n c_i, c_m, c_f = ini_max_fin(backpos)\r\n t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)\r\n m, k, c = get_system_params(percos, t_set)\r\n\r\n dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f, 'rise_time': t_rise, 'peak_time': t_peak,\r\n 'perc_overshoot': percos, 'settling_time': t_set, 'system_mass': m, 'system_spring': k,\r\n 'system_damping': c}\r\n true_dict = {}\r\n for key in sorted(dict_party):\r\n true_dict.update({key: dict_party[key]})\r\n\r\n return true_dict\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n print(analyze_data('data1.csv'))\r\n # print(analyze_data('data2.csv'))\r\n # print(analyze_data('data3.csv'))\r\n # print(analyze_data('data4.csv'))\r\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
"""Test cases for the __main__ module."""
import pytest
from click.testing import CliRunner
from skimpy import __main__
from skimpy import generate_test_data
from skimpy import skim
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) -> None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv("test_file.csv", index=False)
result = runner.invoke(__main__.main, ["test_file.csv"])
assert result.exit_code == 0
def test_000_basic_functionality() -> None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() -> None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime="chartreuse1")
def test_002_header_style() -> None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style="italic green")
def test_003_not_enough_datetimes() -> None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() -> None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = "Named dataframe"
skim(df)
|
normal
|
{
"blob_id": "97a51d959ad642467c508cedc8786f636e4050bb",
"index": 1333,
"step-1": "<mask token>\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\n<mask token>\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() ->None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() ->None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime='chartreuse1')\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\n<mask token>\n\n\ndef test_004_when_df_is_named() ->None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = 'Named dataframe'\n skim(df)\n",
"step-3": "<mask token>\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() ->None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() ->None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime='chartreuse1')\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\ndef test_003_not_enough_datetimes() ->None:\n \"\"\"Tests logic branch with too few datetimes for freq inference.\"\"\"\n df = generate_test_data()\n df = df.head(2)\n skim(df)\n\n\ndef test_004_when_df_is_named() ->None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = 'Named dataframe'\n skim(df)\n",
"step-4": "<mask token>\nimport pytest\nfrom click.testing import CliRunner\nfrom skimpy import __main__\nfrom skimpy import generate_test_data\nfrom skimpy import skim\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() ->None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() ->None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime='chartreuse1')\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\ndef test_003_not_enough_datetimes() ->None:\n \"\"\"Tests logic branch with too few datetimes for freq inference.\"\"\"\n df = generate_test_data()\n df = df.head(2)\n skim(df)\n\n\ndef test_004_when_df_is_named() ->None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = 'Named dataframe'\n skim(df)\n",
"step-5": "\"\"\"Test cases for the __main__ module.\"\"\"\nimport pytest\nfrom click.testing import CliRunner\n\nfrom skimpy import __main__\nfrom skimpy import generate_test_data\nfrom skimpy import skim\n\n\n@pytest.fixture\ndef runner() -> CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) -> None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv(\"test_file.csv\", index=False)\n result = runner.invoke(__main__.main, [\"test_file.csv\"])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() -> None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() -> None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime=\"chartreuse1\")\n\n\ndef test_002_header_style() -> None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style=\"italic green\")\n\n\ndef test_003_not_enough_datetimes() -> None:\n \"\"\"Tests logic branch with too few datetimes for freq inference.\"\"\"\n df = generate_test_data()\n df = df.head(2)\n skim(df)\n\n\ndef test_004_when_df_is_named() -> None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = \"Named dataframe\"\n skim(df)\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
'''
filter_items = lambda a : a[0] == 'b'
fruits = ["apple", "banana", "pear", "orange"]
result = filter(filter_items, fruits)
print(list(result))
'''
'''
Given a list of integers, return the even integers in the list.
input = [11, 4, 5, 8, 9, 2, 12]
output = [4, 8, 2, 12]
input = [3, 5, 7]
output = []
'''
# even_integers = lambda a : a / 2 == 0
even_integers = lambda a : a % 2 == 0
input = [11, 4, 5, 8, 9, 2, 12]
result = filter(even_integers, input)
print(list(result))
input = [3, 5, 7]
result = filter(even_integers, input)
print(list(result))
|
normal
|
{
"blob_id": "7d9032b2426dbf3c285b99efa78be38d8f76ec24",
"index": 1933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(list(result))\n<mask token>\nprint(list(result))\n",
"step-3": "<mask token>\neven_integers = lambda a: a % 2 == 0\ninput = [11, 4, 5, 8, 9, 2, 12]\nresult = filter(even_integers, input)\nprint(list(result))\ninput = [3, 5, 7]\nresult = filter(even_integers, input)\nprint(list(result))\n",
"step-4": "'''\nfilter_items = lambda a : a[0] == 'b'\n\nfruits = [\"apple\", \"banana\", \"pear\", \"orange\"]\nresult = filter(filter_items, fruits)\nprint(list(result))\n'''\n\n'''\nGiven a list of integers, return the even integers in the list.\n\ninput = [11, 4, 5, 8, 9, 2, 12]\noutput = [4, 8, 2, 12]\n\ninput = [3, 5, 7]\noutput = []\n'''\n\n# even_integers = lambda a : a / 2 == 0\neven_integers = lambda a : a % 2 == 0\n\ninput = [11, 4, 5, 8, 9, 2, 12]\nresult = filter(even_integers, input)\nprint(list(result))\n\ninput = [3, 5, 7]\nresult = filter(even_integers, input)\nprint(list(result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# MIT License
#
# Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from layers import *
from utils import *
from collections import namedtuple
trinet_parameters = namedtuple('parameters',
'encoder, '
'height, width, '
'batch_size, '
'num_threads, '
'num_epochs, '
'alpha_image_loss, '
'disp_gradient_loss_weight, '
'lr_loss_weight, '
'full_summary')
class trinet(object):
def __init__(self,params, mode, left, central, right, reuse_variables=None, model_index=0, net='vgg'):
self.params = params
self.mode = mode
self.model_collection = ['model_0']
self.left = left
self.right = right
self.central = central
self.reuse_variables = reuse_variables
self.model_index = model_index
self.build_model(net)
self.build_outputs()
if self.mode == 'test':
return
self.build_losses()
self.build_summaries()
def gradient_x(self, img):
gx = img[:,:,:-1,:] - img[:,:,1:,:]
return gx
def gradient_y(self, img):
gy = img[:,:-1,:,:] - img[:,1:,:,:]
return gy
def scale_pyramid(self, img, num_scales):
scaled_imgs = [img]
s = tf.shape(img)
h = s[1]
w = s[2]
for i in range(num_scales - 1):
ratio = 2 ** (i + 1)
nh = h // ratio
nw = w // ratio
scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))
return scaled_imgs
def generate_image_left(self, img, disp):
return bilinear_sampler_1d_h(img, -disp)
def generate_image_right(self, img, disp):
return bilinear_sampler_1d_h(img, disp)
def SSIM(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')
mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')
sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2
sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2
sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
return tf.clip_by_value((1 - SSIM) / 2, 0, 1)
def get_disparity_smoothness(self, disp, pyramid):
disp_gradients_x = [self.gradient_x(d) for d in disp]
disp_gradients_y = [self.gradient_y(d) for d in disp]
image_gradients_x = [self.gradient_x(img) for img in pyramid]
image_gradients_y = [self.gradient_y(img) for img in pyramid]
weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x]
weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y]
smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(4)]
smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(4)]
return smoothness_x + smoothness_y
# Build model
def build_model(self,net):
with tf.variable_scope('model', reuse=self.reuse_variables) as scope:
self.left_pyramid = self.scale_pyramid(self.left, 4)
# if self.mode == 'train':
self.right_pyramid = self.scale_pyramid(self.right, 4)
self.central_pyramid = self.scale_pyramid(self.central, 4)
with tf.variable_scope('shared-encoder'):
features_cr = self.build_encoder(self.central,model_name=net)
features_cl = features_cr
with tf.variable_scope('encoder-C2R'):
self.disp_c2r = self.build_decoder(features_cr,model_name=net)
with tf.variable_scope('encoder-C2L'):
self.disp_c2l = self.build_decoder(features_cl,model_name=net)
# Build shared encoder
def build_encoder(self, model_input, model_name='vgg'):
with tf.variable_scope('encoder'):
if model_name == 'vgg':
conv1 = conv_block(model_input, 32, 7) # H/2
conv2 = conv_block(conv1, 64, 5) # H/4
conv3 = conv_block(conv2, 128, 3) # H/8
conv4 = conv_block(conv3, 256, 3) # H/16
conv5 = conv_block(conv4, 512, 3) # H/32
conv6 = conv_block(conv5, 512, 3) # H/64
conv7 = conv_block(conv6, 512, 3) # H/128
return conv7, conv1, conv2, conv3, conv4, conv5, conv6
elif model_name == 'resnet50':
conv1 = conv(model_input, 64, 7, 2) # H/2 - 64D
pool1 = maxpool(conv1, 3) # H/4 - 64D
conv2 = resblock(pool1, 64, 3) # H/8 - 256D
conv3 = resblock(conv2, 128, 4) # H/16 - 512D
conv4 = resblock(conv3, 256, 6) # H/32 - 1024D
conv5 = resblock(conv4, 512, 3) # H/64 - 2048D
return conv5, conv1, pool1, conv2, conv3, conv4
def build_decoder(self, skip, model_name='vgg'):
with tf.variable_scope('decoder'):
if model_name == 'vgg':
upconv7 = upconv(skip[0], 512, 3, 2) #H/64
concat7 = tf.concat([upconv7, skip[6]], 3)
iconv7 = conv(concat7, 512, 3, 1)
upconv6 = upconv(iconv7, 512, 3, 2) #H/32
concat6 = tf.concat([upconv6, skip[5]], 3)
iconv6 = conv(concat6, 512, 3, 1)
upconv5 = upconv(iconv6, 256, 3, 2) #H/16
concat5 = tf.concat([upconv5, skip[4]], 3)
iconv5 = conv(concat5, 256, 3, 1)
upconv4 = upconv(iconv5, 128, 3, 2) #H/8
concat4 = tf.concat([upconv4, skip[3]], 3)
iconv4 = conv(concat4, 128, 3, 1)
disp4 = get_disp(iconv4)
udisp4 = upsample_nn(disp4, 2)
upconv3 = upconv(iconv4, 64, 3, 2) #H/4
concat3 = tf.concat([upconv3, skip[2], udisp4], 3)
iconv3 = conv(concat3, 64, 3, 1)
disp3 = get_disp(iconv3)
udisp3 = upsample_nn(disp3, 2)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
concat2 = tf.concat([upconv2, skip[1], udisp3], 3)
iconv2 = conv(concat2, 32, 3, 1)
disp2 = get_disp(iconv2)
udisp2 = upsample_nn(disp2, 2)
upconv1 = upconv(iconv2, 16, 3, 2) #H
concat1 = tf.concat([upconv1, udisp2], 3)
iconv1 = conv(concat1, 16, 3, 1)
disp1 = get_disp(iconv1)
elif model_name == 'resnet50':
upconv6 = upconv(skip[0], 512, 3, 2) #H/32
concat6 = tf.concat([upconv6, skip[5]], 3)
iconv6 = conv(concat6, 512, 3, 1)
upconv5 = upconv(iconv6, 256, 3, 2) #H/16
concat5 = tf.concat([upconv5, skip[4]], 3)
iconv5 = conv(concat5, 256, 3, 1)
upconv4 = upconv(iconv5, 128, 3, 2) #H/8
concat4 = tf.concat([upconv4, skip[3]], 3)
iconv4 = conv(concat4, 128, 3, 1)
disp4 = get_disp(iconv4)
udisp4 = upsample_nn(disp4, 2)
upconv3 = upconv(iconv4, 64, 3, 2) #H/4
concat3 = tf.concat([upconv3, skip[2], udisp4], 3)
iconv3 = conv(concat3, 64, 3, 1)
disp3 = get_disp(iconv3)
udisp3 = upsample_nn(disp3, 2)
upconv2 = upconv(iconv3, 32, 3, 2) #H/2
concat2 = tf.concat([upconv2, skip[1], udisp3], 3)
iconv2 = conv(concat2, 32, 3, 1)
disp2 = get_disp(iconv2)
udisp2 = upsample_nn(disp2, 2)
upconv1 = upconv(iconv2, 16, 3, 2) #H
concat1 = tf.concat([upconv1, udisp2], 3)
iconv1 = conv(concat1, 16, 3, 1)
disp1 = get_disp(iconv1)
return disp1, disp2, disp3, disp4
def build_outputs(self):
#self.disparity_cr = self.disp_cr[0][0,:,:,0]
#self.disparity_cl = self.disp_cl[0][0,:,:,0]
#self.warp_left = generate_image_left(self.placeholders['im0'], self.disparity_cl)[0]
#self.warp_right = generate_image_right(self.placeholders['im0'], self.disparity_cr)[0]
# STORE DISPARITIES
with tf.variable_scope('disparities'):
self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2l]
self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2l]
self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2r]
self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2r]
# GENERATE IMAGES
with tf.variable_scope('images'):
self.left_est = [self.generate_image_left(self.central_pyramid[i], self.disp_lc[i]) for i in range(4)]
self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)]
self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)]
self.right_est = [self.generate_image_right(self.central_pyramid[i], self.disp_rc[i]) for i in range(4)]
# LR CONSISTENCY
with tf.variable_scope('left-right'):
self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)]
self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)]
self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)]
self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)]
# DISPARITY SMOOTHNESS
with tf.variable_scope('smoothness'):
self.disp_lc_smoothness = self.get_disparity_smoothness(self.disp_lc, self.left_pyramid)
self.disp_cl_smoothness = self.get_disparity_smoothness(self.disp_cl, self.central_pyramid)
self.disp_cr_smoothness = self.get_disparity_smoothness(self.disp_cr, self.central_pyramid)
self.disp_rc_smoothness = self.get_disparity_smoothness(self.disp_rc, self.right_pyramid)
def build_losses(self):
with tf.variable_scope('losses', reuse=self.reuse_variables):
# IMAGE RECONSTRUCTION
# L1
self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left]
self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right]
self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl]
self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)]
self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr]
# SSIM
self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid[i]) for i in range(4)]
self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]
self.ssim_right = [self.SSIM(self.right_est[i], self.right_pyramid[i]) for i in range(4)]
self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]
self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[i]) for i in range(4)]
self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]
self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[i]) for i in range(4)]
self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]
# WEIGTHED SUM
self.image_loss_right = [self.params.alpha_image_loss * self.ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i] for i in range(4)]
self.image_loss_left = [self.params.alpha_image_loss * self.ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i] for i in range(4)]
self.image_loss_cl = [self.params.alpha_image_loss * self.ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cl[i] for i in range(4)]
self.image_loss_cr = [self.params.alpha_image_loss * self.ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cr[i] for i in range(4)]
self.image_loss = tf.add_n(self.image_loss_left + self.image_loss_cl + self.image_loss_right + self.image_loss_cr)
self.image_loss_L = tf.add_n(self.image_loss_left + self.image_loss_cl)
self.image_loss_R = tf.add_n(self.image_loss_right + self.image_loss_cr)
# DISPARITY SMOOTHNESS
self.disp_lc_loss = [tf.reduce_mean(tf.abs(self.disp_lc_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_cl_loss = [tf.reduce_mean(tf.abs(self.disp_cl_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_rc_loss = [tf.reduce_mean(tf.abs(self.disp_rc_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_cr_loss = [tf.reduce_mean(tf.abs(self.disp_cr_smoothness[i])) / 2 ** i for i in range(4)]
self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)
self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.disp_cl_loss)
self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.disp_cr_loss)
# LR CONSISTENCY
self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)]
self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)]
self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)]
self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)]
self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss)
self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)
self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)
# CENTRAL DISPARITY CONSISTENCY
self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.disp_cl[i] - self.disp_cr[i])) for i in range(4)]
self.central_disparity_loss = tf.add_n(self.central_disparity_dif)
# TOTAL LOSS
self.total_loss = self.image_loss + self.params.disp_gradient_loss_weight * self.disp_gradient_loss + self.params.lr_loss_weight * self.lr_loss + self.central_disparity_loss
self.total_loss_L = self.image_loss_L + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L
self.total_loss_R = self.image_loss_R + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R
def build_summaries(self):
# SUMMARIES
with tf.device('/cpu:0'):
for i in range(4):
tf.summary.scalar('ssim_loss_' + str(i), self.ssim_loss_left[i] + self.ssim_loss_cl[i] + self.ssim_loss_right[i] + self.ssim_loss_cr[i], collections=self.model_collection)
tf.summary.scalar('l1_loss_' + str(i), self.l1_reconstruction_loss_left[i] + self.l1_reconstruction_loss_cl[i] + self.l1_reconstruction_loss_right[i] + self.l1_reconstruction_loss_cr[i], collections=self.model_collection)
tf.summary.scalar('image_loss_' + str(i), self.image_loss_left[i] + self.image_loss_cl[i] + self.image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection)
tf.summary.scalar('disp_gradient_loss_' + str(i), self.disp_lc_loss[i] + self.disp_cl_loss[i] + self.disp_rc_loss[i] + self.disp_cr_loss[i], collections=self.model_collection)
tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.lr_cr_loss[i], collections=self.model_collection)
tf.summary.scalar('total_loss_L', self.total_loss_L, collections= self.model_collection)
tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection)
tf.summary.scalar('central_disparity_loss', self.central_disparity_loss, collections=self.model_collection)
tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('left_pyramid_' + str(i), self.left_pyramid[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('central_pyramid_' + str(i), self.central_pyramid[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('right_pyramid_' + str(i), self.right_pyramid[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)
if self.params.full_summary:
#tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_left_' + str(i), self.ssim_left[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_right_' + str(i), self.ssim_right[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_cl_' + str(i), self.ssim_cl[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('ssim_cr_' + str(i), self.ssim_cr[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('l1_left_' + str(i), self.l1_left[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection)
#tf.summary.image('l1_cl_' + str(i), self.l1_cl[i], max_outputs=4, collections=self.model_collection)
tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection)
if self.params.full_summary:
tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection)
tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection)
tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection)
|
normal
|
{
"blob_id": "fbd8af4ab3e4ebdcb07509db776d38f9c26fd06a",
"index": 9446,
"step-1": "<mask token>\n\n\nclass trinet(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n <mask token>\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n <mask token>\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass trinet(object):\n\n def __init__(self, params, mode, left, central, right, reuse_variables=\n None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:, :, :-1, :] - img[:, :, 1:, :]\n return gx\n\n def gradient_y(self, img):\n gy = img[:, :-1, :, :] - img[:, 1:, :, :]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n <mask token>\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n\n def build_outputs(self):\n with tf.variable_scope('disparities'):\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2l]\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2r]\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[\n i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i],\n self.disp_cl[i]) for i in range(4)]\n self.cr_est = [self.generate_image_left(self.right_pyramid[i],\n self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.\n central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i],\n self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i],\n self.disp_cl[i]) for i in range(4)]\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i],\n self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i],\n self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.\n disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.\n disp_cl, self.central_pyramid)\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.\n disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.\n disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n\n def build_summaries(self):\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.\n ssim_loss_left[i] + self.ssim_loss_cl[i] + self.\n ssim_loss_right[i] + self.ssim_loss_cr[i], collections=\n self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.\n l1_reconstruction_loss_left[i] + self.\n l1_reconstruction_loss_cl[i] + self.\n l1_reconstruction_loss_right[i] + self.\n l1_reconstruction_loss_cr[i], collections=self.\n model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.\n image_loss_left[i] + self.image_loss_cl[i] + self.\n image_loss_right[i] + self.image_loss_cr[i],\n collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.\n disp_lc_loss[i] + self.disp_cl_loss[i] + self.\n disp_rc_loss[i] + self.disp_cr_loss[i], collections=\n self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] +\n self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.\n lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L,\n collections=self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R,\n collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.\n central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i\n ], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.\n left_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.\n central_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.\n right_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('l1_right_' + str(i), self.l1_right[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4,\n collections=self.model_collection)\n",
"step-3": "<mask token>\n\n\nclass trinet(object):\n\n def __init__(self, params, mode, left, central, right, reuse_variables=\n None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:, :, :-1, :] - img[:, :, 1:, :]\n return gx\n\n def gradient_y(self, img):\n gy = img[:, :-1, :, :] - img[:, 1:, :, :]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\n def get_disparity_smoothness(self, disp, pyramid):\n disp_gradients_x = [self.gradient_x(d) for d in disp]\n disp_gradients_y = [self.gradient_y(d) for d in disp]\n image_gradients_x = [self.gradient_x(img) for img in pyramid]\n image_gradients_y = [self.gradient_y(img) for img in pyramid]\n weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_x]\n weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_y]\n smoothness_x = [(disp_gradients_x[i] * weights_x[i]) for i in range(4)]\n smoothness_y = [(disp_gradients_y[i] * weights_y[i]) for i in range(4)]\n return smoothness_x + smoothness_y\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n\n def build_outputs(self):\n with tf.variable_scope('disparities'):\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2l]\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2r]\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[\n i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i],\n self.disp_cl[i]) for i in range(4)]\n self.cr_est = [self.generate_image_left(self.right_pyramid[i],\n self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.\n central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i],\n self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i],\n self.disp_cl[i]) for i in range(4)]\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i],\n self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i],\n self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.\n disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.\n disp_cl, self.central_pyramid)\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.\n disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.\n disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n\n def build_summaries(self):\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.\n ssim_loss_left[i] + self.ssim_loss_cl[i] + self.\n ssim_loss_right[i] + self.ssim_loss_cr[i], collections=\n self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.\n l1_reconstruction_loss_left[i] + self.\n l1_reconstruction_loss_cl[i] + self.\n l1_reconstruction_loss_right[i] + self.\n l1_reconstruction_loss_cr[i], collections=self.\n model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.\n image_loss_left[i] + self.image_loss_cl[i] + self.\n image_loss_right[i] + self.image_loss_cr[i],\n collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.\n disp_lc_loss[i] + self.disp_cl_loss[i] + self.\n disp_rc_loss[i] + self.disp_cr_loss[i], collections=\n self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] +\n self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.\n lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L,\n collections=self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R,\n collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.\n central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i\n ], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.\n left_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.\n central_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.\n right_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('l1_right_' + str(i), self.l1_right[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4,\n collections=self.model_collection)\n",
"step-4": "from layers import *\nfrom utils import *\nfrom collections import namedtuple\ntrinet_parameters = namedtuple('parameters',\n 'encoder, height, width, batch_size, num_threads, num_epochs, alpha_image_loss, disp_gradient_loss_weight, lr_loss_weight, full_summary'\n )\n\n\nclass trinet(object):\n\n def __init__(self, params, mode, left, central, right, reuse_variables=\n None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:, :, :-1, :] - img[:, :, 1:, :]\n return gx\n\n def gradient_y(self, img):\n gy = img[:, :-1, :, :] - img[:, 1:, :, :]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y, 3, 1, 'VALID') - mu_x * mu_y\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n SSIM = SSIM_n / SSIM_d\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\n def get_disparity_smoothness(self, disp, pyramid):\n disp_gradients_x = [self.gradient_x(d) for d in disp]\n disp_gradients_y = [self.gradient_y(d) for d in disp]\n image_gradients_x = [self.gradient_x(img) for img in pyramid]\n image_gradients_y = [self.gradient_y(img) for img in pyramid]\n weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_x]\n weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for\n g in image_gradients_y]\n smoothness_x = [(disp_gradients_x[i] * weights_x[i]) for i in range(4)]\n smoothness_y = [(disp_gradients_y[i] * weights_y[i]) for i in range(4)]\n return smoothness_x + smoothness_y\n\n def build_model(self, net):\n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central, model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr, model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl, model_name=net)\n\n def build_encoder(self, model_input, model_name='vgg'):\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7)\n conv2 = conv_block(conv1, 64, 5)\n conv3 = conv_block(conv2, 128, 3)\n conv4 = conv_block(conv3, 256, 3)\n conv5 = conv_block(conv4, 512, 3)\n conv6 = conv_block(conv5, 512, 3)\n conv7 = conv_block(conv6, 512, 3)\n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2)\n pool1 = maxpool(conv1, 3)\n conv2 = resblock(pool1, 64, 3)\n conv3 = resblock(conv2, 128, 4)\n conv4 = resblock(conv3, 256, 6)\n conv5 = resblock(conv4, 512, 3)\n return conv5, conv1, pool1, conv2, conv3, conv4\n\n def build_decoder(self, skip, model_name='vgg'):\n with tf.variable_scope('decoder'):\n if model_name == 'vgg':\n upconv7 = upconv(skip[0], 512, 3, 2)\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n upconv6 = upconv(iconv7, 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n elif model_name == 'resnet50':\n upconv6 = upconv(skip[0], 512, 3, 2)\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n upconv5 = upconv(iconv6, 256, 3, 2)\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n upconv4 = upconv(iconv5, 128, 3, 2)\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n upconv3 = upconv(iconv4, 64, 3, 2)\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n upconv2 = upconv(iconv3, 32, 3, 2)\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n upconv1 = upconv(iconv2, 16, 3, 2)\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n return disp1, disp2, disp3, disp4\n\n def build_outputs(self):\n with tf.variable_scope('disparities'):\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2l]\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.\n disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.\n disp_c2r]\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[\n i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i],\n self.disp_cl[i]) for i in range(4)]\n self.cr_est = [self.generate_image_left(self.right_pyramid[i],\n self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.\n central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i],\n self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i],\n self.disp_cl[i]) for i in range(4)]\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i],\n self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i],\n self.disp_rc[i]) for i in range(4)]\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.\n disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.\n disp_cl, self.central_pyramid)\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.\n disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.\n disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in\n self.l1_left]\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[\n i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in\n self.l1_right]\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in\n self.l1_cl]\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for\n i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in\n self.l1_cr]\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid\n [i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n self.ssim_right = [self.SSIM(self.right_est[i], self.\n right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[\n i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n self.image_loss_right = [(self.params.alpha_image_loss * self.\n ssim_loss_right[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_right[i]) for i in range(4)]\n self.image_loss_left = [(self.params.alpha_image_loss * self.\n ssim_loss_left[i] + (1 - self.params.alpha_image_loss) *\n self.l1_reconstruction_loss_left[i]) for i in range(4)]\n self.image_loss_cl = [(self.params.alpha_image_loss * self.\n ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cl[i]) for i in range(4)]\n self.image_loss_cr = [(self.params.alpha_image_loss * self.\n ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self\n .l1_reconstruction_loss_cr[i]) for i in range(4)]\n self.image_loss = tf.add_n(self.image_loss_left + self.\n image_loss_cl + self.image_loss_right + self.image_loss_cr)\n self.image_loss_L = tf.add_n(self.image_loss_left + self.\n image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.\n image_loss_cr)\n self.disp_lc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_lc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cl_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cl_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_rc_loss = [(tf.reduce_mean(tf.abs(self.\n disp_rc_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_cr_loss = [(tf.reduce_mean(tf.abs(self.\n disp_cr_smoothness[i])) / 2 ** i) for i in range(4)]\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.\n disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.\n disp_cr_loss)\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] -\n self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] -\n self.disp_cl[i])) for i in range(4)]\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] -\n self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] -\n self.disp_cr[i])) for i in range(4)]\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss +\n self.lr_rc_loss + self.lr_cr_loss)\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.\n disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n self.total_loss = (self.image_loss + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss + self.\n params.lr_loss_weight * self.lr_loss + self.\n central_disparity_loss)\n self.total_loss_L = (self.image_loss_L + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_L + \n self.params.lr_loss_weight * self.lr_loss_L)\n self.total_loss_R = (self.image_loss_R + self.params.\n disp_gradient_loss_weight * self.disp_gradient_loss_R + \n self.params.lr_loss_weight * self.lr_loss_R)\n\n def build_summaries(self):\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.\n ssim_loss_left[i] + self.ssim_loss_cl[i] + self.\n ssim_loss_right[i] + self.ssim_loss_cr[i], collections=\n self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.\n l1_reconstruction_loss_left[i] + self.\n l1_reconstruction_loss_cl[i] + self.\n l1_reconstruction_loss_right[i] + self.\n l1_reconstruction_loss_cr[i], collections=self.\n model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.\n image_loss_left[i] + self.image_loss_cl[i] + self.\n image_loss_right[i] + self.image_loss_cr[i],\n collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.\n disp_lc_loss[i] + self.disp_cl_loss[i] + self.\n disp_rc_loss[i] + self.disp_cr_loss[i], collections=\n self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] +\n self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.\n lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L,\n collections=self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R,\n collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.\n central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i\n ], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.\n left_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.\n central_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.\n right_pyramid[i], max_outputs=4, collections=self.\n model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('l1_right_' + str(i), self.l1_right[i],\n max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i],\n max_outputs=4, collections=self.model_collection)\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4,\n collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4,\n collections=self.model_collection)\n",
"step-5": "#\n# MIT License\n#\n# Copyright (c) 2018 Matteo Poggi m.poggi@unibo.it\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom layers import *\nfrom utils import *\nfrom collections import namedtuple\n\ntrinet_parameters = namedtuple('parameters',\n 'encoder, '\n 'height, width, '\n 'batch_size, '\n 'num_threads, '\n 'num_epochs, '\n 'alpha_image_loss, '\n 'disp_gradient_loss_weight, '\n 'lr_loss_weight, '\n 'full_summary')\n\nclass trinet(object):\n\n def __init__(self,params, mode, left, central, right, reuse_variables=None, model_index=0, net='vgg'):\n self.params = params\n self.mode = mode\n self.model_collection = ['model_0']\n self.left = left\n self.right = right\n self.central = central\n self.reuse_variables = reuse_variables\n self.model_index = model_index\n\n self.build_model(net)\n self.build_outputs()\n if self.mode == 'test':\n return\n\n self.build_losses()\n self.build_summaries()\n\n def gradient_x(self, img):\n gx = img[:,:,:-1,:] - img[:,:,1:,:]\n return gx\n\n def gradient_y(self, img):\n gy = img[:,:-1,:,:] - img[:,1:,:,:]\n return gy\n\n def scale_pyramid(self, img, num_scales):\n scaled_imgs = [img]\n s = tf.shape(img)\n h = s[1]\n w = s[2]\n for i in range(num_scales - 1):\n ratio = 2 ** (i + 1)\n nh = h // ratio\n nw = w // ratio\n scaled_imgs.append(tf.image.resize_area(img, [nh, nw]))\n return scaled_imgs\n\n def generate_image_left(self, img, disp):\n return bilinear_sampler_1d_h(img, -disp)\n\n def generate_image_right(self, img, disp):\n return bilinear_sampler_1d_h(img, disp)\n\n def SSIM(self, x, y):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n\n mu_x = slim.avg_pool2d(x, 3, 1, 'VALID')\n mu_y = slim.avg_pool2d(y, 3, 1, 'VALID')\n\n sigma_x = slim.avg_pool2d(x ** 2, 3, 1, 'VALID') - mu_x ** 2\n sigma_y = slim.avg_pool2d(y ** 2, 3, 1, 'VALID') - mu_y ** 2\n sigma_xy = slim.avg_pool2d(x * y , 3, 1, 'VALID') - mu_x * mu_y\n\n SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)\n SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)\n\n SSIM = SSIM_n / SSIM_d\n\n return tf.clip_by_value((1 - SSIM) / 2, 0, 1)\n\n\n def get_disparity_smoothness(self, disp, pyramid):\n disp_gradients_x = [self.gradient_x(d) for d in disp]\n disp_gradients_y = [self.gradient_y(d) for d in disp]\n\n image_gradients_x = [self.gradient_x(img) for img in pyramid]\n image_gradients_y = [self.gradient_y(img) for img in pyramid]\n\n weights_x = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_x]\n weights_y = [tf.exp(-tf.reduce_mean(tf.abs(g), 3, keep_dims=True)) for g in image_gradients_y]\n\n smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(4)]\n smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(4)]\n return smoothness_x + smoothness_y\n\n # Build model\n def build_model(self,net): \n with tf.variable_scope('model', reuse=self.reuse_variables) as scope:\n self.left_pyramid = self.scale_pyramid(self.left, 4)\n # if self.mode == 'train':\n self.right_pyramid = self.scale_pyramid(self.right, 4)\n self.central_pyramid = self.scale_pyramid(self.central, 4)\n\n with tf.variable_scope('shared-encoder'):\n features_cr = self.build_encoder(self.central,model_name=net)\n features_cl = features_cr\n with tf.variable_scope('encoder-C2R'):\n self.disp_c2r = self.build_decoder(features_cr,model_name=net)\n with tf.variable_scope('encoder-C2L'):\n self.disp_c2l = self.build_decoder(features_cl,model_name=net)\n \n # Build shared encoder\n def build_encoder(self, model_input, model_name='vgg'):\n\n with tf.variable_scope('encoder'):\n if model_name == 'vgg':\n conv1 = conv_block(model_input, 32, 7) # H/2\n conv2 = conv_block(conv1, 64, 5) # H/4\n conv3 = conv_block(conv2, 128, 3) # H/8\n conv4 = conv_block(conv3, 256, 3) # H/16\n conv5 = conv_block(conv4, 512, 3) # H/32\n conv6 = conv_block(conv5, 512, 3) # H/64\n conv7 = conv_block(conv6, 512, 3) # H/128 \n return conv7, conv1, conv2, conv3, conv4, conv5, conv6\n\n elif model_name == 'resnet50':\n conv1 = conv(model_input, 64, 7, 2) # H/2 - 64D\n pool1 = maxpool(conv1, 3) # H/4 - 64D\n conv2 = resblock(pool1, 64, 3) # H/8 - 256D\n conv3 = resblock(conv2, 128, 4) # H/16 - 512D\n conv4 = resblock(conv3, 256, 6) # H/32 - 1024D\n conv5 = resblock(conv4, 512, 3) # H/64 - 2048D\n return conv5, conv1, pool1, conv2, conv3, conv4 \n\n def build_decoder(self, skip, model_name='vgg'):\n\n with tf.variable_scope('decoder'):\n if model_name == 'vgg': \n upconv7 = upconv(skip[0], 512, 3, 2) #H/64\n concat7 = tf.concat([upconv7, skip[6]], 3)\n iconv7 = conv(concat7, 512, 3, 1)\n\n upconv6 = upconv(iconv7, 512, 3, 2) #H/32\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n\n upconv5 = upconv(iconv6, 256, 3, 2) #H/16\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n\n upconv4 = upconv(iconv5, 128, 3, 2) #H/8\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n\n upconv3 = upconv(iconv4, 64, 3, 2) #H/4\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n\n upconv2 = upconv(iconv3, 32, 3, 2) #H/2\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n\n upconv1 = upconv(iconv2, 16, 3, 2) #H\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n\n elif model_name == 'resnet50': \n upconv6 = upconv(skip[0], 512, 3, 2) #H/32\n concat6 = tf.concat([upconv6, skip[5]], 3)\n iconv6 = conv(concat6, 512, 3, 1)\n\n upconv5 = upconv(iconv6, 256, 3, 2) #H/16\n concat5 = tf.concat([upconv5, skip[4]], 3)\n iconv5 = conv(concat5, 256, 3, 1)\n\n upconv4 = upconv(iconv5, 128, 3, 2) #H/8\n concat4 = tf.concat([upconv4, skip[3]], 3)\n iconv4 = conv(concat4, 128, 3, 1)\n disp4 = get_disp(iconv4)\n udisp4 = upsample_nn(disp4, 2)\n\n upconv3 = upconv(iconv4, 64, 3, 2) #H/4\n concat3 = tf.concat([upconv3, skip[2], udisp4], 3)\n iconv3 = conv(concat3, 64, 3, 1)\n disp3 = get_disp(iconv3)\n udisp3 = upsample_nn(disp3, 2)\n\n upconv2 = upconv(iconv3, 32, 3, 2) #H/2\n concat2 = tf.concat([upconv2, skip[1], udisp3], 3)\n iconv2 = conv(concat2, 32, 3, 1)\n disp2 = get_disp(iconv2)\n udisp2 = upsample_nn(disp2, 2)\n\n upconv1 = upconv(iconv2, 16, 3, 2) #H\n concat1 = tf.concat([upconv1, udisp2], 3)\n iconv1 = conv(concat1, 16, 3, 1)\n disp1 = get_disp(iconv1)\n\n return disp1, disp2, disp3, disp4 \n def build_outputs(self):\n #self.disparity_cr = self.disp_cr[0][0,:,:,0]\n #self.disparity_cl = self.disp_cl[0][0,:,:,0]\n #self.warp_left = generate_image_left(self.placeholders['im0'], self.disparity_cl)[0]\n #self.warp_right = generate_image_right(self.placeholders['im0'], self.disparity_cr)[0]\n\n # STORE DISPARITIES\n with tf.variable_scope('disparities'):\n\n self.disp_lc = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2l]\n self.disp_cl = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2l]\n\n self.disp_cr = [tf.expand_dims(d[:, :, :, 0], 3) for d in self.disp_c2r]\n self.disp_rc = [tf.expand_dims(d[:, :, :, 1], 3) for d in self.disp_c2r]\n\n # GENERATE IMAGES\n with tf.variable_scope('images'):\n self.left_est = [self.generate_image_left(self.central_pyramid[i], self.disp_lc[i]) for i in range(4)]\n self.cl_est = [self.generate_image_right(self.left_pyramid[i], self.disp_cl[i]) for i in range(4)]\n\n self.cr_est = [self.generate_image_left(self.right_pyramid[i], self.disp_cr[i]) for i in range(4)]\n self.right_est = [self.generate_image_right(self.central_pyramid[i], self.disp_rc[i]) for i in range(4)]\n\n # LR CONSISTENCY\n with tf.variable_scope('left-right'):\n self.cl_to_lc_disp = [self.generate_image_left(self.disp_cl[i], self.disp_lc[i]) for i in range(4)]\n self.lc_to_cl_disp = [self.generate_image_right(self.disp_lc[i], self.disp_cl[i]) for i in range(4)]\n\n self.rc_to_cr_disp = [self.generate_image_left(self.disp_rc[i], self.disp_cr[i]) for i in range(4)]\n self.cr_to_rc_disp = [self.generate_image_right(self.disp_cr[i], self.disp_rc[i]) for i in range(4)]\n\n # DISPARITY SMOOTHNESS\n with tf.variable_scope('smoothness'):\n self.disp_lc_smoothness = self.get_disparity_smoothness(self.disp_lc, self.left_pyramid)\n self.disp_cl_smoothness = self.get_disparity_smoothness(self.disp_cl, self.central_pyramid)\n\n self.disp_cr_smoothness = self.get_disparity_smoothness(self.disp_cr, self.central_pyramid)\n self.disp_rc_smoothness = self.get_disparity_smoothness(self.disp_rc, self.right_pyramid)\n\n def build_losses(self):\n with tf.variable_scope('losses', reuse=self.reuse_variables):\n # IMAGE RECONSTRUCTION\n # L1\n self.l1_left = [tf.abs(self.left_est[i] - self.left_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_left = [tf.reduce_mean(l) for l in self.l1_left]\n\n self.l1_right = [tf.abs(self.right_est[i] - self.right_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_right = [tf.reduce_mean(l) for l in self.l1_right]\n\n self.l1_cl = [tf.abs(self.cl_est[i] - self.central_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_cl = [tf.reduce_mean(l) for l in self.l1_cl]\n\n self.l1_cr = [tf.abs(self.cr_est[i] - self.central_pyramid[i]) for i in range(4)]\n self.l1_reconstruction_loss_cr = [tf.reduce_mean(l) for l in self.l1_cr]\n\n # SSIM\n self.ssim_left = [self.SSIM(self.left_est[i], self.left_pyramid[i]) for i in range(4)]\n self.ssim_loss_left = [tf.reduce_mean(s) for s in self.ssim_left]\n\n self.ssim_right = [self.SSIM(self.right_est[i], self.right_pyramid[i]) for i in range(4)]\n self.ssim_loss_right = [tf.reduce_mean(s) for s in self.ssim_right]\n\n self.ssim_cl = [self.SSIM(self.cl_est[i], self.central_pyramid[i]) for i in range(4)]\n self.ssim_loss_cl = [tf.reduce_mean(s) for s in self.ssim_cl]\n\n self.ssim_cr = [self.SSIM(self.cr_est[i], self.central_pyramid[i]) for i in range(4)]\n self.ssim_loss_cr = [tf.reduce_mean(s) for s in self.ssim_cr]\n\n # WEIGTHED SUM\n self.image_loss_right = [self.params.alpha_image_loss * self.ssim_loss_right[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_right[i] for i in range(4)]\n self.image_loss_left = [self.params.alpha_image_loss * self.ssim_loss_left[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_left[i] for i in range(4)]\n self.image_loss_cl = [self.params.alpha_image_loss * self.ssim_loss_cl[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cl[i] for i in range(4)]\n self.image_loss_cr = [self.params.alpha_image_loss * self.ssim_loss_cr[i] + (1 - self.params.alpha_image_loss) * self.l1_reconstruction_loss_cr[i] for i in range(4)]\n\n self.image_loss = tf.add_n(self.image_loss_left + self.image_loss_cl + self.image_loss_right + self.image_loss_cr)\n\n self.image_loss_L = tf.add_n(self.image_loss_left + self.image_loss_cl)\n self.image_loss_R = tf.add_n(self.image_loss_right + self.image_loss_cr)\n\n\n # DISPARITY SMOOTHNESS\n self.disp_lc_loss = [tf.reduce_mean(tf.abs(self.disp_lc_smoothness[i])) / 2 ** i for i in range(4)]\n self.disp_cl_loss = [tf.reduce_mean(tf.abs(self.disp_cl_smoothness[i])) / 2 ** i for i in range(4)]\n\n self.disp_rc_loss = [tf.reduce_mean(tf.abs(self.disp_rc_smoothness[i])) / 2 ** i for i in range(4)]\n self.disp_cr_loss = [tf.reduce_mean(tf.abs(self.disp_cr_smoothness[i])) / 2 ** i for i in range(4)]\n\n self.disp_gradient_loss = tf.add_n(self.disp_lc_loss + self.disp_cl_loss + self.disp_rc_loss + self.disp_cr_loss)\n\n self.disp_gradient_loss_L = tf.add_n(self.disp_lc_loss + self.disp_cl_loss)\n self.disp_gradient_loss_R = tf.add_n(self.disp_rc_loss + self.disp_cr_loss)\n\n\n # LR CONSISTENCY\n self.lr_lc_loss = [tf.reduce_mean(tf.abs(self.cl_to_lc_disp[i] - self.disp_lc[i])) for i in range(4)]\n self.lr_cl_loss = [tf.reduce_mean(tf.abs(self.lc_to_cl_disp[i] - self.disp_cl[i])) for i in range(4)]\n\n self.lr_rc_loss = [tf.reduce_mean(tf.abs(self.cr_to_rc_disp[i] - self.disp_rc[i])) for i in range(4)]\n self.lr_cr_loss = [tf.reduce_mean(tf.abs(self.rc_to_cr_disp[i] - self.disp_cr[i])) for i in range(4)]\n\n\n self.lr_loss = tf.add_n(self.lr_lc_loss + self.lr_cl_loss + self.lr_rc_loss + self.lr_cr_loss)\n\n self.lr_loss_L = tf.add_n(self.lr_lc_loss + self.lr_cl_loss)\n self.lr_loss_R = tf.add_n(self.lr_rc_loss + self.lr_cr_loss)\n\n # CENTRAL DISPARITY CONSISTENCY\n self.central_disparity_dif = [tf.reduce_mean(tf.abs(self.disp_cl[i] - self.disp_cr[i])) for i in range(4)]\n self.central_disparity_loss = tf.add_n(self.central_disparity_dif)\n\n # TOTAL LOSS\n self.total_loss = self.image_loss + self.params.disp_gradient_loss_weight * self.disp_gradient_loss + self.params.lr_loss_weight * self.lr_loss + self.central_disparity_loss\n\n self.total_loss_L = self.image_loss_L + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_L + self.params.lr_loss_weight * self.lr_loss_L\n self.total_loss_R = self.image_loss_R + self.params.disp_gradient_loss_weight * self.disp_gradient_loss_R + self.params.lr_loss_weight * self.lr_loss_R\n\n def build_summaries(self):\n # SUMMARIES\n with tf.device('/cpu:0'):\n for i in range(4):\n tf.summary.scalar('ssim_loss_' + str(i), self.ssim_loss_left[i] + self.ssim_loss_cl[i] + self.ssim_loss_right[i] + self.ssim_loss_cr[i], collections=self.model_collection)\n tf.summary.scalar('l1_loss_' + str(i), self.l1_reconstruction_loss_left[i] + self.l1_reconstruction_loss_cl[i] + self.l1_reconstruction_loss_right[i] + self.l1_reconstruction_loss_cr[i], collections=self.model_collection)\n tf.summary.scalar('image_loss_' + str(i), self.image_loss_left[i] + self.image_loss_cl[i] + self.image_loss_right[i] + self.image_loss_cr[i], collections=self.model_collection)\n tf.summary.scalar('disp_gradient_loss_' + str(i), self.disp_lc_loss[i] + self.disp_cl_loss[i] + self.disp_rc_loss[i] + self.disp_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('lr_loss_' + str(i), self.lr_lc_loss[i] + self.lr_cl_loss[i] + self.lr_rc_loss[i] + self.lr_cr_loss[i], collections=self.model_collection)\n tf.summary.scalar('total_loss_L', self.total_loss_L, collections= self.model_collection)\n tf.summary.scalar('total_loss_R', self.total_loss_R, collections=self.model_collection)\n tf.summary.scalar('central_disparity_loss', self.central_disparity_loss, collections=self.model_collection)\n tf.summary.image('disp_left_est_' + str(i), self.disp_lc[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cl_est_' + str(i), self.disp_cl[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_right_est_' + str(i), self.disp_rc[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('disp_cr_est_' + str(i), self.disp_cr[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_pyramid_' + str(i), self.left_pyramid[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('central_pyramid_' + str(i), self.central_pyramid[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_pyramid_' + str(i), self.right_pyramid[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)\n\n if self.params.full_summary:\n #tf.summary.image('left_est_' + str(i), self.left_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('right_est_' + str(i), self.right_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('cl_est_' + str(i), self.cl_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('cr_est_' + str(i), self.cr_est[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_left_' + str(i), self.ssim_left[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_right_' + str(i), self.ssim_right[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_cl_' + str(i), self.ssim_cl[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('ssim_cr_' + str(i), self.ssim_cr[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('l1_left_' + str(i), self.l1_left[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_right_' + str(i), self.l1_right[i], max_outputs=4, collections=self.model_collection)\n #tf.summary.image('l1_cl_' + str(i), self.l1_cl[i], max_outputs=4, collections=self.model_collection)\n tf.summary.image('l1_cr_' + str(i), self.l1_cr[i], max_outputs=4, collections=self.model_collection)\n\n if self.params.full_summary:\n tf.summary.image('left', self.left, max_outputs=4, collections=self.model_collection)\n tf.summary.image('right', self.right, max_outputs=4, collections=self.model_collection)\n tf.summary.image('central', self.central, max_outputs=4, collections=self.model_collection)",
"step-ids": [
8,
14,
15,
17,
18
]
}
|
[
8,
14,
15,
17,
18
] |
<|reserved_special_token_0|>
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id
).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body'][
'data'].encode('UTF-8'))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part[
'body']['attachmentId']).execute()
file_data = base64.urlsafe_b64decode(attachment['data']
.encode('UTF-8'))
else:
file_data = None
if file_data:
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
<|reserved_special_token_0|>
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id
).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body'][
'data'].encode('UTF-8'))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part[
'body']['attachmentId']).execute()
file_data = base64.urlsafe_b64decode(attachment['data']
.encode('UTF-8'))
else:
file_data = None
if file_data:
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
def Reply_With_Attchment(service, userId, receiver, subject, message,
attachments, threadId, message_id):
"""Reply to message with the new pdf attached.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
receiver: Email address of who to send to.
subject: Email subject.
message: Email message, plain text
attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs
threadId: Used to match reply with message thread
message_id: Identifies specific message to interact with.
"""
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = receiver
mimeMessage['subject'] = subject
mimeMessage['threadId'] = threadId
mimeMessage['In-Reply-To'] = message_id
mimeMessage['References'] = message_id
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
if attachments != None:
attachment = attachments
content_type = mimetypes.guess_type(attachment)
main_type, sub_type = content_type[0].split('/', 1)
file_name = os.path.basename(attachment)
f = open(attachment, 'rb')
myFile = MIMEBase(main_type, sub_type)
myFile.set_payload(f.read())
myFile.add_header('Content-Disposition', 'attachment', filename=
file_name)
encoders.encode_base64(myFile)
f.close()
mimeMessage.attach(myFile)
raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).
decode()}
raw_string['threadId'] = threadId
message = service.users().messages().send(userId=userId, body=raw_string
).execute()
<|reserved_special_token_0|>
def Get_Message_Info(service, userId, message_id):
"""Retrieves received message info, returns tuple.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
message_info = service.users().messages().get(userId=userId, id=message_id
).execute()
ID = message_info['id']
thread_id = message_info['threadId']
header_info = message_info['payload']['headers']
for header in header_info:
if header['name'] == 'Message-ID':
message_id = header['value']
if header['name'] == 'From':
sender = header['value']
if header['name'] == 'Subject':
subject = header['value']
attachment_info = message_info['payload']['parts']
attachment_list = []
for attachment in attachment_info:
if attachment['mimeType'] == 'application/pdf':
attachment_list.append(attachment['filename'])
info = sender, subject, thread_id, message_id, attachment_list, ID
return info
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id
).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body'][
'data'].encode('UTF-8'))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part[
'body']['attachmentId']).execute()
file_data = base64.urlsafe_b64decode(attachment['data']
.encode('UTF-8'))
else:
file_data = None
if file_data:
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
def Reply_With_Attchment(service, userId, receiver, subject, message,
attachments, threadId, message_id):
"""Reply to message with the new pdf attached.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
receiver: Email address of who to send to.
subject: Email subject.
message: Email message, plain text
attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs
threadId: Used to match reply with message thread
message_id: Identifies specific message to interact with.
"""
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = receiver
mimeMessage['subject'] = subject
mimeMessage['threadId'] = threadId
mimeMessage['In-Reply-To'] = message_id
mimeMessage['References'] = message_id
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
if attachments != None:
attachment = attachments
content_type = mimetypes.guess_type(attachment)
main_type, sub_type = content_type[0].split('/', 1)
file_name = os.path.basename(attachment)
f = open(attachment, 'rb')
myFile = MIMEBase(main_type, sub_type)
myFile.set_payload(f.read())
myFile.add_header('Content-Disposition', 'attachment', filename=
file_name)
encoders.encode_base64(myFile)
f.close()
mimeMessage.attach(myFile)
raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).
decode()}
raw_string['threadId'] = threadId
message = service.users().messages().send(userId=userId, body=raw_string
).execute()
def Get_Unread_Messages(service, userId):
"""Retrieves all unread messages with attachments, returns list of message ids.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
"""
message_list = []
message_ids = service.users().messages().list(userId=userId, labelIds=
'INBOX', alt='json', q='is:unread has:attachment').execute()
if message_ids['resultSizeEstimate'] > 0:
for message in message_ids['messages']:
message_list.append(message['id'])
return message_list
def Get_Message_Info(service, userId, message_id):
"""Retrieves received message info, returns tuple.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
message_info = service.users().messages().get(userId=userId, id=message_id
).execute()
ID = message_info['id']
thread_id = message_info['threadId']
header_info = message_info['payload']['headers']
for header in header_info:
if header['name'] == 'Message-ID':
message_id = header['value']
if header['name'] == 'From':
sender = header['value']
if header['name'] == 'Subject':
subject = header['value']
attachment_info = message_info['payload']['parts']
attachment_list = []
for attachment in attachment_info:
if attachment['mimeType'] == 'application/pdf':
attachment_list.append(attachment['filename'])
info = sender, subject, thread_id, message_id, attachment_list, ID
return info
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
<|reserved_special_token_1|>
import base64
from apiclient import errors
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import mimetypes
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id
).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body'][
'data'].encode('UTF-8'))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part[
'body']['attachmentId']).execute()
file_data = base64.urlsafe_b64decode(attachment['data']
.encode('UTF-8'))
else:
file_data = None
if file_data:
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
def Reply_With_Attchment(service, userId, receiver, subject, message,
attachments, threadId, message_id):
"""Reply to message with the new pdf attached.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
receiver: Email address of who to send to.
subject: Email subject.
message: Email message, plain text
attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs
threadId: Used to match reply with message thread
message_id: Identifies specific message to interact with.
"""
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = receiver
mimeMessage['subject'] = subject
mimeMessage['threadId'] = threadId
mimeMessage['In-Reply-To'] = message_id
mimeMessage['References'] = message_id
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
if attachments != None:
attachment = attachments
content_type = mimetypes.guess_type(attachment)
main_type, sub_type = content_type[0].split('/', 1)
file_name = os.path.basename(attachment)
f = open(attachment, 'rb')
myFile = MIMEBase(main_type, sub_type)
myFile.set_payload(f.read())
myFile.add_header('Content-Disposition', 'attachment', filename=
file_name)
encoders.encode_base64(myFile)
f.close()
mimeMessage.attach(myFile)
raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).
decode()}
raw_string['threadId'] = threadId
message = service.users().messages().send(userId=userId, body=raw_string
).execute()
def Get_Unread_Messages(service, userId):
"""Retrieves all unread messages with attachments, returns list of message ids.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
"""
message_list = []
message_ids = service.users().messages().list(userId=userId, labelIds=
'INBOX', alt='json', q='is:unread has:attachment').execute()
if message_ids['resultSizeEstimate'] > 0:
for message in message_ids['messages']:
message_list.append(message['id'])
return message_list
def Get_Message_Info(service, userId, message_id):
"""Retrieves received message info, returns tuple.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
message_info = service.users().messages().get(userId=userId, id=message_id
).execute()
ID = message_info['id']
thread_id = message_info['threadId']
header_info = message_info['payload']['headers']
for header in header_info:
if header['name'] == 'Message-ID':
message_id = header['value']
if header['name'] == 'From':
sender = header['value']
if header['name'] == 'Subject':
subject = header['value']
attachment_info = message_info['payload']['parts']
attachment_list = []
for attachment in attachment_info:
if attachment['mimeType'] == 'application/pdf':
attachment_list.append(attachment['filename'])
info = sender, subject, thread_id, message_id, attachment_list, ID
return info
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import base64
from apiclient import errors
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import mimetypes
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))
#self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part['body']['attachmentId']
).execute()
file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))
#self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))
else:
file_data = None
if file_data:
#do some staff, e.g.
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
def Reply_With_Attchment(service, userId, receiver, subject, message, attachments, threadId, message_id):
"""Reply to message with the new pdf attached.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
receiver: Email address of who to send to.
subject: Email subject.
message: Email message, plain text
attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs
threadId: Used to match reply with message thread
message_id: Identifies specific message to interact with.
"""
# Create email message
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = receiver
mimeMessage['subject'] = subject
mimeMessage['threadId'] = threadId
mimeMessage['In-Reply-To'] = message_id
mimeMessage['References'] = message_id
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
# Attach files
if attachments != None:
attachment = attachments
content_type = mimetypes.guess_type(attachment)
main_type, sub_type = content_type[0].split('/', 1)
file_name = os.path.basename(attachment)
f = open(attachment, 'rb')
myFile = MIMEBase(main_type, sub_type)
myFile.set_payload(f.read())
myFile.add_header('Content-Disposition', 'attachment', filename=file_name)
encoders.encode_base64(myFile)
f.close()
mimeMessage.attach(myFile)
raw_string = {'raw':base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()}
raw_string['threadId']=threadId
message = service.users().messages().send(userId=userId, body=raw_string).execute()
def Get_Unread_Messages(service, userId):
"""Retrieves all unread messages with attachments, returns list of message ids.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
"""
message_list = []
message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt="json", q='is:unread has:attachment').execute()
if message_ids['resultSizeEstimate'] > 0:
for message in message_ids['messages']:
message_list.append(message['id'])
return message_list
def Get_Message_Info(service, userId, message_id):
"""Retrieves received message info, returns tuple.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
message_info = service.users().messages().get(userId=userId, id=message_id).execute()
ID = message_info['id']
thread_id = message_info['threadId']
header_info = message_info['payload']['headers']
for header in header_info:
if header['name']=='Message-ID':
message_id=header['value']
if header['name']=='From':
sender=header['value']
if header['name']=='Subject':
subject=header['value']
attachment_info = message_info['payload']['parts']
attachment_list = []
for attachment in attachment_info:
if attachment['mimeType'] == 'application/pdf':
attachment_list.append(attachment['filename'])
info = (sender, subject, thread_id, message_id, attachment_list, ID)
return info
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
|
flexible
|
{
"blob_id": "dee1ab3adb7f627680410c774be44ae196f63f6c",
"index": 587,
"step-1": "<mask token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\n<mask token>\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-2": "<mask token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\n<mask token>\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-3": "<mask token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-4": "import base64\nfrom apiclient import errors\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport mimetypes\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-5": "#!/usr/bin/env python3\n\nimport base64\nfrom apiclient import errors\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport mimetypes\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message, attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n # Create email message\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n \n # Attach files\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n\n f = open(attachment, 'rb')\n\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=file_name)\n encoders.encode_base64(myFile)\n\n f.close()\n\n mimeMessage.attach(myFile)\n \n raw_string = {'raw':base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()}\n raw_string['threadId']=threadId\n \n message = service.users().messages().send(userId=userId, body=raw_string).execute()\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt=\"json\", q='is:unread has:attachment').execute()\n \n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n\n return message_list\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id).execute()\n\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name']=='Message-ID':\n message_id=header['value']\n if header['name']=='From':\n sender=header['value']\n if header['name']=='Subject':\n subject=header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n\n info = (sender, subject, thread_id, message_id, attachment_list, ID)\n return info\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
__author__ = 'Jager'
from equipment import Equipment
class Weapon (Equipment):
def __init__(self, name, power):
super(Weapon, self).__init__(name)
self.power = power
@staticmethod
def fromJSON(jsonstr):
obj = Equipment.fromJSON(jsonstr)
return Weapon(obj["name"], obj["power"])
def __str__(self):
return "{}: Power({})".format(self.name, self.power)
|
normal
|
{
"blob_id": "276d7ac493ddcb327dbce279d9f4bc8a74c98245",
"index": 5749,
"step-1": "<mask token>\n\n\nclass Weapon(Equipment):\n\n def __init__(self, name, power):\n super(Weapon, self).__init__(name)\n self.power = power\n <mask token>\n\n def __str__(self):\n return '{}: Power({})'.format(self.name, self.power)\n",
"step-2": "<mask token>\n\n\nclass Weapon(Equipment):\n\n def __init__(self, name, power):\n super(Weapon, self).__init__(name)\n self.power = power\n\n @staticmethod\n def fromJSON(jsonstr):\n obj = Equipment.fromJSON(jsonstr)\n return Weapon(obj['name'], obj['power'])\n\n def __str__(self):\n return '{}: Power({})'.format(self.name, self.power)\n",
"step-3": "__author__ = 'Jager'\n<mask token>\n\n\nclass Weapon(Equipment):\n\n def __init__(self, name, power):\n super(Weapon, self).__init__(name)\n self.power = power\n\n @staticmethod\n def fromJSON(jsonstr):\n obj = Equipment.fromJSON(jsonstr)\n return Weapon(obj['name'], obj['power'])\n\n def __str__(self):\n return '{}: Power({})'.format(self.name, self.power)\n",
"step-4": "__author__ = 'Jager'\nfrom equipment import Equipment\n\n\nclass Weapon(Equipment):\n\n def __init__(self, name, power):\n super(Weapon, self).__init__(name)\n self.power = power\n\n @staticmethod\n def fromJSON(jsonstr):\n obj = Equipment.fromJSON(jsonstr)\n return Weapon(obj['name'], obj['power'])\n\n def __str__(self):\n return '{}: Power({})'.format(self.name, self.power)\n",
"step-5": "__author__ = 'Jager'\nfrom equipment import Equipment\n\n\nclass Weapon (Equipment):\n def __init__(self, name, power):\n super(Weapon, self).__init__(name)\n self.power = power\n\n @staticmethod\n def fromJSON(jsonstr):\n obj = Equipment.fromJSON(jsonstr)\n return Weapon(obj[\"name\"], obj[\"power\"])\n\n def __str__(self):\n return \"{}: Power({})\".format(self.name, self.power)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import math
import numpy as np
import cv2
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
if (__name__ == "__main__"):
cap = cv2.VideoCapture('dfd1.mp4')
mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)
count = 0
#list = ['video' + str(n) for n in range(100)]
while True:
list = []
ret, frame = cap.read()
ret1, frame1 = cap.read()
fgmask = mog.apply(frame)
mask = np.zeros_like(frame1)
mask1 = np.zeros_like(frame1)
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(closing, kernel, iterations=1)
canny = cv2.Canny(dilation, 100, 200)
cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)
cv2.imshow('mask', fgmask)
cv2.imshow('mask3', dilation)
cv2.imshow('mask15', canny)
cv2.imshow('mask4', frame)
cv2.imshow('mask8', frame[100:160, 220:550])
for i in range(len(contours)):
point = []
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame1, (int(x+w/2), int(y+h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 0), 3)
X = int(x+w/2)
Y = int(y+h/2)
distance = math.sqrt(X^2+Y^2)
mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]
#(0,0)에서 좌표 거리 계산 후 리스트에 첨가
point.append(distance)
point.append(X)
point.append(Y)
list.append(point)
#같은 좌표 값 제거
if count == 0:
print("List has one List")
elif list[count][1] == list[count-1][1] and list[count][2] == list[count-1][2] :
a = list.pop()
count = count - 1
count = count + 1
count = 0
#(0,0)에서 부터의 거리 오름차순 정리
if not list:
print("empty")
else:
list.sort()
print(list)
'''
for i in range(len(list)):
if count == 0:
print("list 내용 한개")
else:
#오름차순 정리된 점 거리 계산
distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)
print(count)
print(list[count][1],list[count][2])
print(list[count-1][1],list[count-1][2])
print("거리 ",distance1)
count = count + 1
count = 0
'''
cv2.imshow('mask2', frame1)
print(' 장면 전환')
cv2.imshow('mask7', mask)
k = cv2.waitKey(300) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "28a0ae0492fb676044c1f9ced7a5a4819e99a8d9",
"index": 8890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n cap = cv2.VideoCapture('dfd1.mp4')\n mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)\n count = 0\n while True:\n list = []\n ret, frame = cap.read()\n ret1, frame1 = cap.read()\n fgmask = mog.apply(frame)\n mask = np.zeros_like(frame1)\n mask1 = np.zeros_like(frame1)\n kernel = np.ones((5, 5), np.uint8)\n opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n dilation = cv2.dilate(closing, kernel, iterations=1)\n canny = cv2.Canny(dilation, 100, 200)\n cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)\n cv2.imshow('mask', fgmask)\n cv2.imshow('mask3', dilation)\n cv2.imshow('mask15', canny)\n cv2.imshow('mask4', frame)\n cv2.imshow('mask8', frame[100:160, 220:550])\n for i in range(len(contours)):\n point = []\n cnt = contours[i]\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame1, (int(x + w / 2), int(y + h / 2)), (int(x +\n w / 2), int(y + h / 2)), (255, 0, 0), 3)\n X = int(x + w / 2)\n Y = int(y + h / 2)\n distance = math.sqrt(X ^ 2 + Y ^ 2)\n mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]\n point.append(distance)\n point.append(X)\n point.append(Y)\n list.append(point)\n if count == 0:\n print('List has one List')\n elif list[count][1] == list[count - 1][1] and list[count][2\n ] == list[count - 1][2]:\n a = list.pop()\n count = count - 1\n count = count + 1\n count = 0\n if not list:\n print('empty')\n else:\n list.sort()\n print(list)\n \"\"\"\n for i in range(len(list)):\n if count == 0:\n print(\"list 내용 한개\")\n else:\n #오름차순 정리된 점 거리 계산\n distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)\n print(count)\n print(list[count][1],list[count][2])\n print(list[count-1][1],list[count-1][2])\n print(\"거리 \",distance1)\n count = count + 1\n count = 0\n \"\"\"\n cv2.imshow('mask2', frame1)\n print(\n ' 장면 전환'\n )\n cv2.imshow('mask7', mask)\n k = cv2.waitKey(300) & 255\n if k == 27:\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-3": "import math\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom scipy.spatial.distance import cdist\nif __name__ == '__main__':\n cap = cv2.VideoCapture('dfd1.mp4')\n mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)\n count = 0\n while True:\n list = []\n ret, frame = cap.read()\n ret1, frame1 = cap.read()\n fgmask = mog.apply(frame)\n mask = np.zeros_like(frame1)\n mask1 = np.zeros_like(frame1)\n kernel = np.ones((5, 5), np.uint8)\n opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n dilation = cv2.dilate(closing, kernel, iterations=1)\n canny = cv2.Canny(dilation, 100, 200)\n cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)\n cv2.imshow('mask', fgmask)\n cv2.imshow('mask3', dilation)\n cv2.imshow('mask15', canny)\n cv2.imshow('mask4', frame)\n cv2.imshow('mask8', frame[100:160, 220:550])\n for i in range(len(contours)):\n point = []\n cnt = contours[i]\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame1, (int(x + w / 2), int(y + h / 2)), (int(x +\n w / 2), int(y + h / 2)), (255, 0, 0), 3)\n X = int(x + w / 2)\n Y = int(y + h / 2)\n distance = math.sqrt(X ^ 2 + Y ^ 2)\n mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]\n point.append(distance)\n point.append(X)\n point.append(Y)\n list.append(point)\n if count == 0:\n print('List has one List')\n elif list[count][1] == list[count - 1][1] and list[count][2\n ] == list[count - 1][2]:\n a = list.pop()\n count = count - 1\n count = count + 1\n count = 0\n if not list:\n print('empty')\n else:\n list.sort()\n print(list)\n \"\"\"\n for i in range(len(list)):\n if count == 0:\n print(\"list 내용 한개\")\n else:\n #오름차순 정리된 점 거리 계산\n distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)\n print(count)\n print(list[count][1],list[count][2])\n print(list[count-1][1],list[count-1][2])\n print(\"거리 \",distance1)\n count = count + 1\n count = 0\n \"\"\"\n cv2.imshow('mask2', frame1)\n print(\n ' 장면 전환'\n )\n cv2.imshow('mask7', mask)\n k = cv2.waitKey(300) & 255\n if k == 27:\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-4": "import math\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\nfrom scipy.spatial.distance import cdist\r\n\r\n\r\nif (__name__ == \"__main__\"):\r\n cap = cv2.VideoCapture('dfd1.mp4')\r\n mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)\r\n count = 0\r\n\r\n #list = ['video' + str(n) for n in range(100)]\r\n while True:\r\n list = []\r\n ret, frame = cap.read()\r\n ret1, frame1 = cap.read()\r\n fgmask = mog.apply(frame)\r\n mask = np.zeros_like(frame1)\r\n mask1 = np.zeros_like(frame1)\r\n\r\n\r\n kernel = np.ones((5, 5), np.uint8)\r\n opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\r\n dilation = cv2.dilate(closing, kernel, iterations=1)\r\n\r\n canny = cv2.Canny(dilation, 100, 200)\r\n cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)\r\n\r\n cv2.imshow('mask', fgmask)\r\n cv2.imshow('mask3', dilation)\r\n cv2.imshow('mask15', canny)\r\n cv2.imshow('mask4', frame)\r\n cv2.imshow('mask8', frame[100:160, 220:550])\r\n\r\n for i in range(len(contours)):\r\n point = []\r\n cnt = contours[i]\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n cv2.rectangle(frame1, (int(x+w/2), int(y+h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 0), 3)\r\n X = int(x+w/2)\r\n Y = int(y+h/2)\r\n distance = math.sqrt(X^2+Y^2)\r\n mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]\r\n\r\n #(0,0)에서 좌표 거리 계산 후 리스트에 첨가\r\n point.append(distance)\r\n point.append(X)\r\n point.append(Y)\r\n list.append(point)\r\n\r\n #같은 좌표 값 제거\r\n if count == 0:\r\n print(\"List has one List\")\r\n elif list[count][1] == list[count-1][1] and list[count][2] == list[count-1][2] :\r\n a = list.pop()\r\n count = count - 1\r\n count = count + 1\r\n count = 0\r\n\r\n #(0,0)에서 부터의 거리 오름차순 정리\r\n if not list:\r\n print(\"empty\")\r\n else:\r\n list.sort()\r\n print(list)\r\n '''\r\n for i in range(len(list)):\r\n if count == 0:\r\n print(\"list 내용 한개\")\r\n else:\r\n #오름차순 정리된 점 거리 계산\r\n distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)\r\n print(count)\r\n print(list[count][1],list[count][2])\r\n print(list[count-1][1],list[count-1][2])\r\n print(\"거리 \",distance1)\r\n count = count + 1\r\n count = 0\r\n '''\r\n cv2.imshow('mask2', frame1)\r\n\r\n\r\n print(' 장면 전환')\r\n cv2.imshow('mask7', mask)\r\n\r\n\r\n\r\n k = cv2.waitKey(300) & 0xFF\r\n if k == 27:\r\n break\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import itertools as itt
from random import random
from sys import float_info
DIGITS = 3
ACCURACY = 0.001
UP_MAX = 30
class AngleInfo(object):
def __init__(self, information):
# 0 <= spin <= 360
# 0 <= up <= UP_MAX
# -1 <= sin, cos <= 1
if len(information) == 2:
# initialize with angles
spin = round(information[0] % 360, DIGITS)
up = round(information[1], DIGITS)
#print "\tangle - spin:%f, up:%f" % (spin, up)
if spin < 0 or 360 < spin or up < 0 or UP_MAX < up:
# invalid angles
up = None
spin = None
elif len(information) == 3:
# initialized with trigon. function
sin_s = information[0]
cos_s = information[1]
sin_u = information[2]
#print "\ttrigo - ss:%f, cs:%f, su:%f" % (sin_s, cos_s, sin_u)
if reduce(
lambda acc, item:
acc & (-1 <= item and item <= 1),
[sin_s, cos_s, sin_u],
True):
# denormalization
sin_u_org = sin_u * (np.sin(np.radians(UP_MAX)) / 1.0)
up = np.rad2deg(np.arcsin(sin_u_org))
spin = AngleInfo.calculateSpinAngle(sin_s, cos_s)
else:
# invalid trigon. func values
up = None
spin = None
if spin != float_info.max:
self.spin = round(spin, DIGITS)
self.up = round(up, DIGITS)
else:
self.spin = None
self.up = None
def getAngles(self):
return (self.spin, self.up)
def getVectors(self):
if self.spin is None or self.up is None:
return (None, None, None)
else:
return (np.sin(np.radians(self.spin)),
np.cos(np.radians(self.spin)),
np.sin(np.radians(self.up)) / np.sin(np.radians(UP_MAX)))
@staticmethod
def calculateSpinAngle(sin_s, cos_s):
spin_fsin = np.rad2deg(np.arcsin(sin_s))
if spin_fsin < 0:
spin_fsin = spin_fsin + 360
spin_fcos = np.rad2deg(np.arccos(cos_s))
if spin_fcos < 0:
spin_focs = spin_fcos + 360
angles_fsin = set([spin_fsin % 360, (540 - spin_fsin) % 360])
angles_fcos = set([spin_fcos % 360, (360 - spin_fcos) % 360])
angles = list(itt.product(angles_fsin, angles_fcos))
res = None
for i in angles:
if abs(i[0] - i[1]) < ACCURACY:
res = (i[0] + i[1]) / 2.0
return (res if res is not None else float_info.max)
@staticmethod
def getRandomVector():
spin = random() * 360
up = random() * 30
return (np.sin(np.radians(spin)), np.cos(np.radians(spin)), np.sin(np.radians(up)) / np.sin(np.radians(UP_MAX)))
def main():
s = 100
u = 100
for i in range(s):
for j in range(u):
a = AngleInfo(AngleInfo.getRandomVector())
b = AngleInfo(a.getVectors())
print a.getAngles(), b.getAngles(), a.getVectors(), b.getVectors()
if not a.getAngles() == b.getAngles() or not a.getVectors() == b.getVectors():
print "check failed at %d %d" % (i, j)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "97bbbbe6a3a89b9acc22ebdff0b96625d6267178",
"index": 3341,
"step-1": "import numpy as np\nimport itertools as itt\nfrom random import random\nfrom sys import float_info\n\nDIGITS = 3\nACCURACY = 0.001\nUP_MAX = 30\n\nclass AngleInfo(object):\n\n def __init__(self, information):\n # 0 <= spin <= 360\n # 0 <= up <= UP_MAX\n # -1 <= sin, cos <= 1\n if len(information) == 2:\n # initialize with angles\n spin = round(information[0] % 360, DIGITS)\n up = round(information[1], DIGITS)\n #print \"\\tangle - spin:%f, up:%f\" % (spin, up)\n if spin < 0 or 360 < spin or up < 0 or UP_MAX < up:\n # invalid angles\n up = None\n spin = None\n elif len(information) == 3:\n # initialized with trigon. function\n sin_s = information[0]\n cos_s = information[1]\n sin_u = information[2]\n #print \"\\ttrigo - ss:%f, cs:%f, su:%f\" % (sin_s, cos_s, sin_u)\n if reduce(\n lambda acc, item:\n acc & (-1 <= item and item <= 1),\n [sin_s, cos_s, sin_u],\n True):\n # denormalization\n sin_u_org = sin_u * (np.sin(np.radians(UP_MAX)) / 1.0)\n up = np.rad2deg(np.arcsin(sin_u_org))\n spin = AngleInfo.calculateSpinAngle(sin_s, cos_s)\n else:\n # invalid trigon. func values\n up = None\n spin = None\n if spin != float_info.max:\n self.spin = round(spin, DIGITS)\n self.up = round(up, DIGITS)\n else:\n self.spin = None\n self.up = None\n\n def getAngles(self):\n return (self.spin, self.up)\n\n def getVectors(self):\n if self.spin is None or self.up is None:\n return (None, None, None)\n else:\n return (np.sin(np.radians(self.spin)),\n np.cos(np.radians(self.spin)),\n np.sin(np.radians(self.up)) / np.sin(np.radians(UP_MAX)))\n\n @staticmethod\n def calculateSpinAngle(sin_s, cos_s):\n\n spin_fsin = np.rad2deg(np.arcsin(sin_s))\n if spin_fsin < 0:\n spin_fsin = spin_fsin + 360\n\n spin_fcos = np.rad2deg(np.arccos(cos_s))\n if spin_fcos < 0:\n spin_focs = spin_fcos + 360\n \n angles_fsin = set([spin_fsin % 360, (540 - spin_fsin) % 360])\n angles_fcos = set([spin_fcos % 360, (360 - spin_fcos) % 360])\n angles = list(itt.product(angles_fsin, angles_fcos))\n res = None\n for i in angles:\n if abs(i[0] - i[1]) < ACCURACY:\n res = (i[0] + i[1]) / 2.0\n return (res if res is not None else float_info.max)\n\n @staticmethod\n def getRandomVector():\n spin = random() * 360\n up = random() * 30\n return (np.sin(np.radians(spin)), np.cos(np.radians(spin)), np.sin(np.radians(up)) / np.sin(np.radians(UP_MAX)))\n\ndef main():\n s = 100\n u = 100\n for i in range(s):\n for j in range(u):\n a = AngleInfo(AngleInfo.getRandomVector())\n b = AngleInfo(a.getVectors())\n print a.getAngles(), b.getAngles(), a.getVectors(), b.getVectors()\n if not a.getAngles() == b.getAngles() or not a.getVectors() == b.getVectors():\n print \"check failed at %d %d\" % (i, j)\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def main():
hostid = hostid_get(token)
itemid_array = itemid_get(hostid, token)
update(itemid_array, token)
def hostid_get(token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'host.get'
payload['params'] = {}
payload['params']['output'] = ['hostid']
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
hostid = data['result'][0]['hostid']
return hostid
def itemid_get(hostid, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'item.get'
payload['params'] = {}
payload['params']['output'] = 'itemid'
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '19', '20', '21')
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
itemid_array = []
for itemid in data['result']:
itemid_array.append(str(itemid['itemid']))
return itemid_array
def update(itemid_array, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'task.create'
payload['params'] = []
for itemid in itemid_array:
request = {}
request['type'] = '6'
request['request'] = {}
request['request']['itemid'] = itemid
payload['params'].append(request)
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
json_string = json.dumps(data)
print(json_string)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
hostid = hostid_get(token)
itemid_array = itemid_get(hostid, token)
update(itemid_array, token)
def hostid_get(token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'host.get'
payload['params'] = {}
payload['params']['output'] = ['hostid']
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
hostid = data['result'][0]['hostid']
return hostid
def itemid_get(hostid, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'item.get'
payload['params'] = {}
payload['params']['output'] = 'itemid'
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '19', '20', '21')
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
itemid_array = []
for itemid in data['result']:
itemid_array.append(str(itemid['itemid']))
return itemid_array
def update(itemid_array, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'task.create'
payload['params'] = []
for itemid in itemid_array:
request = {}
request['type'] = '6'
request['request'] = {}
request['request']['itemid'] = itemid
payload['params'].append(request)
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
json_string = json.dumps(data)
print(json_string)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'http://<URL>/zabbix/api_jsonrpc.php?'
token = '<TOKEN>'
headers = {'Content-Type': 'application/json'}
hostname = sys.argv[1]
def main():
hostid = hostid_get(token)
itemid_array = itemid_get(hostid, token)
update(itemid_array, token)
def hostid_get(token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'host.get'
payload['params'] = {}
payload['params']['output'] = ['hostid']
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
hostid = data['result'][0]['hostid']
return hostid
def itemid_get(hostid, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'item.get'
payload['params'] = {}
payload['params']['output'] = 'itemid'
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '19', '20', '21')
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
itemid_array = []
for itemid in data['result']:
itemid_array.append(str(itemid['itemid']))
return itemid_array
def update(itemid_array, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'task.create'
payload['params'] = []
for itemid in itemid_array:
request = {}
request['type'] = '6'
request['request'] = {}
request['request']['itemid'] = itemid
payload['params'].append(request)
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
json_string = json.dumps(data)
print(json_string)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import requests
import json
import sys
url = 'http://<URL>/zabbix/api_jsonrpc.php?'
token = '<TOKEN>'
headers = {'Content-Type': 'application/json'}
hostname = sys.argv[1]
def main():
hostid = hostid_get(token)
itemid_array = itemid_get(hostid, token)
update(itemid_array, token)
def hostid_get(token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'host.get'
payload['params'] = {}
payload['params']['output'] = ['hostid']
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
hostid = data['result'][0]['hostid']
return hostid
def itemid_get(hostid, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'item.get'
payload['params'] = {}
payload['params']['output'] = 'itemid'
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',
'10', '11', '12', '13', '14', '15', '16', '19', '20', '21')
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
itemid_array = []
for itemid in data['result']:
itemid_array.append(str(itemid['itemid']))
return itemid_array
def update(itemid_array, token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'task.create'
payload['params'] = []
for itemid in itemid_array:
request = {}
request['type'] = '6'
request['request'] = {}
request['request']['itemid'] = itemid
payload['params'].append(request)
payload['auth'] = token
payload['id'] = 1
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
json_string = json.dumps(data)
print(json_string)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 Opensource ICT Solutions B.V.
# https://oicts.com
#
#version: 1.0.0
#date: 06-11-2021
import requests
import json
import sys
url = 'http://<URL>/zabbix/api_jsonrpc.php?'
token = '<TOKEN>'
headers = {'Content-Type': 'application/json'}
hostname = sys.argv[1]
def main():
hostid = hostid_get(token)
itemid_array = itemid_get(hostid,token)
update(itemid_array,token)
def hostid_get(token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'host.get'
payload['params'] = {}
payload['params']['output'] = ['hostid']
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['auth'] = token
payload['id'] = 1
#Doing the request
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
hostid = data["result"][0]["hostid"]
return hostid
def itemid_get(hostid,token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'item.get'
payload['params'] = {}
payload['params']['output'] = 'itemid'
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['params']['filter']['type'] = "0", "1", "3", "5", "8", "9", "10", "11", "12", "13", "14", "15", "16", "19", "20", "21"
payload['auth'] = token
payload['id'] = 1
# print(json.dumps(payload))
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
# print(data)
itemid_array = []
for itemid in data['result']:
itemid_array.append(str(itemid['itemid']))
return itemid_array
def update(itemid_array,token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'task.create'
payload['params'] = []
for itemid in itemid_array:
request = {}
request['type'] = '6'
request['request'] = {}
request['request']['itemid'] = itemid
payload['params'].append(request)
payload['auth'] = token
payload['id'] = 1
#print("payload = " + json.dumps(payload))
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
json_string = json.dumps(data)
print(json_string)
if __name__ == '__main__':
# Call to main
main()
|
flexible
|
{
"blob_id": "18d7c486b9070a1c607ba2ba5876309246013182",
"index": 4651,
"step-1": "<mask token>\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nurl = 'http://<URL>/zabbix/api_jsonrpc.php?'\ntoken = '<TOKEN>'\nheaders = {'Content-Type': 'application/json'}\nhostname = sys.argv[1]\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import requests\nimport json\nimport sys\nurl = 'http://<URL>/zabbix/api_jsonrpc.php?'\ntoken = '<TOKEN>'\nheaders = {'Content-Type': 'application/json'}\nhostname = sys.argv[1]\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2021 Opensource ICT Solutions B.V.\n# https://oicts.com\n#\n#version: 1.0.0\n#date: 06-11-2021\n\n\nimport requests\nimport json\nimport sys\n\nurl = 'http://<URL>/zabbix/api_jsonrpc.php?'\ntoken = '<TOKEN>'\n\nheaders = {'Content-Type': 'application/json'}\n\nhostname = sys.argv[1]\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid,token)\n update(itemid_array,token)\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n\n\n #Doing the request\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n\n hostid = data[\"result\"][0][\"hostid\"]\n return hostid\n\ndef itemid_get(hostid,token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = \"0\", \"1\", \"3\", \"5\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"19\", \"20\", \"21\"\n payload['auth'] = token\n payload['id'] = 1\n\n# print(json.dumps(payload))\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n\n# print(data)\n\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\ndef update(itemid_array,token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n\n #print(\"payload = \" + json.dumps(payload))\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n\n print(json_string)\n\nif __name__ == '__main__':\n # Call to main\n main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class QuitButton(QtGui.QWidget):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuitButton(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('quitButton')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(100, 100, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.
SLOT('quit()'))
<|reserved_special_token_0|>
qb.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QuitButton(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('quitButton')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(100, 100, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.
SLOT('quit()'))
app = QtGui.QApplication(sys.argv)
qb = QuitButton()
qb.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
from PyQt4 import QtGui, QtCore
class QuitButton(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('quitButton')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(100, 100, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.
SLOT('quit()'))
app = QtGui.QApplication(sys.argv)
qb = QuitButton()
qb.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
'''
Created on 2016��4��8��
@author: liping
'''
import sys
from PyQt4 import QtGui,QtCore
class QuitButton(QtGui.QWidget):
def __init__(self,parent = None):
QtGui.QWidget.__init__(self,parent)
self.setGeometry(300,300,250,150)
self.setWindowTitle('quitButton')
quit = QtGui.QPushButton('Close',self)
quit.setGeometry(100,100,60,35)
self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()'))
app = QtGui.QApplication(sys.argv)
qb = QuitButton()
qb.show()
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "5a3431b79b8f42b3042bb27d787d0d92891a7415",
"index": 3947,
"step-1": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\n<mask token>\nqb.show()\nsys.exit(app.exec_())\n",
"step-3": "<mask token>\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())\n",
"step-4": "<mask token>\nimport sys\nfrom PyQt4 import QtGui, QtCore\n\n\nclass QuitButton(QtGui.QWidget):\n\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setGeometry(300, 300, 250, 150)\n self.setWindowTitle('quitButton')\n quit = QtGui.QPushButton('Close', self)\n quit.setGeometry(100, 100, 60, 35)\n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp, QtCore.\n SLOT('quit()'))\n\n\napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())\n",
"step-5": "# -*- coding:utf-8 -*-\n'''\nCreated on 2016��4��8��\n\n@author: liping\n'''\n\nimport sys\nfrom PyQt4 import QtGui,QtCore\n\nclass QuitButton(QtGui.QWidget):\n def __init__(self,parent = None):\n QtGui.QWidget.__init__(self,parent)\n \n self.setGeometry(300,300,250,150)\n self.setWindowTitle('quitButton')\n \n quit = QtGui.QPushButton('Close',self)\n quit.setGeometry(100,100,60,35)\n \n self.connect(quit, QtCore.SIGNAL('clicked()'), QtGui.qApp,QtCore.SLOT('quit()'))\n \napp = QtGui.QApplication(sys.argv)\nqb = QuitButton()\nqb.show()\nsys.exit(app.exec_())",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from datetime import datetime
import pytz
from pytz import timezone
##PDXtime = datetime.now()
##print(PDXtime.hour)
##
##NYCtime = PDXtime.hour + 3
##print(NYCtime)
##
##Londontime = PDXtime.hour + 8
##print(Londontime)
Londontz = timezone('Europe/London')
Londonlocaltime = datetime.now(Londontz)
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format
PDXtz = timezone('America/Los_Angeles')
PDXlocaltime = datetime.now(PDXtz)
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
NYCtz = timezone('America/New_York')
NYClocaltime = datetime.now(NYCtz)
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
|
normal
|
{
"blob_id": "d8cfd9de95e1f47fc41a5389f5137b4af90dc0f1",
"index": 3949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\n<mask token>\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\n<mask token>\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-3": "<mask token>\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-4": "from datetime import datetime\nimport pytz\nfrom pytz import timezone\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-5": "from datetime import datetime\nimport pytz\nfrom pytz import timezone \n\n\n\n##PDXtime = datetime.now()\n##print(PDXtime.hour)\n##\n##NYCtime = PDXtime.hour + 3\n##print(NYCtime)\n##\n##Londontime = PDXtime.hour + 8\n##print(Londontime)\n\n\n\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format\n\n\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\n\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
include('f469-disco/manifest_f469.py')
freeze('src')
|
flexible
|
{
"blob_id": "3b29912788fa4cc76f34f52da7728e934ee96637",
"index": 7117,
"step-1": "<mask token>\n",
"step-2": "include('f469-disco/manifest_f469.py')\nfreeze('src')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#Classe do controlador do servidor SEEEEEEERVIDOOOOOOOOOOR
from usuarioModel import *
class ControllerSC:
'''
O controlador define 2 ações:
- adicionar_pessoa: para adicionar novas pessoas no banco de
dados.
- listar_pessoas: retornar a lista das pessoas
Note que as 2 ações supracitadas utilizam a classe do Modelo para
consultar/atualizar o banco de dados
'''
def __init__(self):
pass
@staticmethod
def entrarSC(login, senha):
resultado = Usuario.entrar(login, senha)
return resultado
@staticmethod
def cadastrarSC(usuario):
Usuario.adicionar(usuario)
@staticmethod
def criarPlaylist(dicioPlaylist):
musicas = Playlist.criarPlaylist(dicioPlaylist)
minhasMusicas = json.dumps(musicas.encode())
return minhasMusicas
|
normal
|
{
"blob_id": "39eecf1c7ec19f7c75721caa092c08569f53d3e5",
"index": 9449,
"step-1": "<mask token>\n\n\nclass ControllerSC:\n <mask token>\n <mask token>\n\n @staticmethod\n def entrarSC(login, senha):\n resultado = Usuario.entrar(login, senha)\n return resultado\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ControllerSC:\n <mask token>\n\n def __init__(self):\n pass\n\n @staticmethod\n def entrarSC(login, senha):\n resultado = Usuario.entrar(login, senha)\n return resultado\n\n @staticmethod\n def cadastrarSC(usuario):\n Usuario.adicionar(usuario)\n\n @staticmethod\n def criarPlaylist(dicioPlaylist):\n musicas = Playlist.criarPlaylist(dicioPlaylist)\n minhasMusicas = json.dumps(musicas.encode())\n return minhasMusicas\n",
"step-3": "<mask token>\n\n\nclass ControllerSC:\n \"\"\"\n O controlador define 2 ações:\n - adicionar_pessoa: para adicionar novas pessoas no banco de\n dados. \n - listar_pessoas: retornar a lista das pessoas\n\n Note que as 2 ações supracitadas utilizam a classe do Modelo para\n consultar/atualizar o banco de dados\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def entrarSC(login, senha):\n resultado = Usuario.entrar(login, senha)\n return resultado\n\n @staticmethod\n def cadastrarSC(usuario):\n Usuario.adicionar(usuario)\n\n @staticmethod\n def criarPlaylist(dicioPlaylist):\n musicas = Playlist.criarPlaylist(dicioPlaylist)\n minhasMusicas = json.dumps(musicas.encode())\n return minhasMusicas\n",
"step-4": "from usuarioModel import *\n\n\nclass ControllerSC:\n \"\"\"\n O controlador define 2 ações:\n - adicionar_pessoa: para adicionar novas pessoas no banco de\n dados. \n - listar_pessoas: retornar a lista das pessoas\n\n Note que as 2 ações supracitadas utilizam a classe do Modelo para\n consultar/atualizar o banco de dados\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def entrarSC(login, senha):\n resultado = Usuario.entrar(login, senha)\n return resultado\n\n @staticmethod\n def cadastrarSC(usuario):\n Usuario.adicionar(usuario)\n\n @staticmethod\n def criarPlaylist(dicioPlaylist):\n musicas = Playlist.criarPlaylist(dicioPlaylist)\n minhasMusicas = json.dumps(musicas.encode())\n return minhasMusicas\n",
"step-5": "#Classe do controlador do servidor SEEEEEEERVIDOOOOOOOOOOR\n\nfrom usuarioModel import *\n\n\nclass ControllerSC:\n '''\n O controlador define 2 ações:\n - adicionar_pessoa: para adicionar novas pessoas no banco de\n dados. \n - listar_pessoas: retornar a lista das pessoas\n\n Note que as 2 ações supracitadas utilizam a classe do Modelo para\n consultar/atualizar o banco de dados\n '''\n\n def __init__(self):\n pass\n \n @staticmethod\n def entrarSC(login, senha):\n resultado = Usuario.entrar(login, senha)\n return resultado\n\n @staticmethod\n def cadastrarSC(usuario):\n Usuario.adicionar(usuario)\n\n @staticmethod\n def criarPlaylist(dicioPlaylist):\n \n musicas = Playlist.criarPlaylist(dicioPlaylist)\n minhasMusicas = json.dumps(musicas.encode())\n return minhasMusicas\n ",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
import mlcd,pygame,time,random
PLAYER_CHAR=">"
OBSTACLE_CHAR="|"
screenbuff=[[" "," "," "," "," "," "," "," "," "," "," "," "],
[" "," "," "," "," "," "," "," "," "," "," "," "]]
player={"position":0,"line":0,"score":000}
game={"speed":4.05,"level":2.5,"obstacle":0}
keys={"space":False,"quit":False,"next":False}
def keypress(): #get keypresses
global keys
keys["space"]=keys["quit"]=keys["next"]=False #reset all keys
#check keys
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
keys["space"] = True
elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
keys["quit"] = True
done=False
#initialize mlcd as 16x2 character lcd
mlcd.init(16,2)
lasttime=time.time()
curtime=0.0
while not done:
curtime=time.time()
if (curtime-lasttime>1/game["speed"]):
lasttime=curtime
#increment score and count obstacle
#up the level and increase the speed
if screenbuff[0][player["position"]]==OBSTACLE_CHAR or screenbuff[1][player["position"]]==OBSTACLE_CHAR:
player["score"]+=1
game["obstacle"]-=1
game["level"]+=0.5
game["speed"]+=0.05
#if((game["level"]+2)%game["posmovthres"]==0 and player["position"]<12 and screenbuff[player["line"]][player["position"]+1]!=OBSTACLE_CHAR and screenbuff[player["line"]][player["position"]+2]!=OBSTACLE_CHAR):
# player["position"]+=1
#move everything one place to the left
for lindex,lin in enumerate(screenbuff,start=0):
for index,pos in enumerate(lin, start=0):
if index>0:
screenbuff[lindex][index-1]=pos
#add new chars at end of buff , obstacles if there is a gap
screenbuff[0][-1]=" "
screenbuff[1][-1]=" "
if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[0][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==1):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
elif screenbuff[1][-2] != OBSTACLE_CHAR:
if game["obstacle"]<int(game["level"]) and random.choice([0,1]):
lin_temp=random.choice([0,1])
if(lin_temp==0):
screenbuff[lin_temp][-1]=OBSTACLE_CHAR
game["obstacle"]+=1
#check for collision
if screenbuff[player["line"]][player["position"]]==OBSTACLE_CHAR:
done=True #player lost
#add player to the buffer
screenbuff[player["line"]][player["position"]]=PLAYER_CHAR
#ready the lines for drawing on lcd
lines=[''.join(screenbuff[0]) + "|scr",
''.join(screenbuff[1]) + "|"+str(player["score"])]
mlcd.draw(lines)
#remove player from buffer
screenbuff[player["line"]][player["position"]]=" "
#get keypresses
keypress()
#modify player line (move the player) if space is pressed
if keys["space"]:
if player["line"]==0:
player["line"]=1
else:
player["line"]=0
#quit
if keys["quit"]:
print("game quit")
done=True
pygame.quit()
|
normal
|
{
"blob_id": "aeaab602cbb9fa73992eb5259e8603ecb11ba333",
"index": 4863,
"step-1": "<mask token>\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\n<mask token>\nmlcd.init(16, 2)\n<mask token>\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-3": "<mask token>\nPLAYER_CHAR = '>'\nOBSTACLE_CHAR = '|'\nscreenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]\nplayer = {'position': 0, 'line': 0, 'score': 0}\ngame = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}\nkeys = {'space': False, 'quit': False, 'next': False}\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\ndone = False\nmlcd.init(16, 2)\nlasttime = time.time()\ncurtime = 0.0\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-4": "import mlcd, pygame, time, random\nPLAYER_CHAR = '>'\nOBSTACLE_CHAR = '|'\nscreenbuff = [[' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '],\n [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']]\nplayer = {'position': 0, 'line': 0, 'score': 0}\ngame = {'speed': 4.05, 'level': 2.5, 'obstacle': 0}\nkeys = {'space': False, 'quit': False, 'next': False}\n\n\ndef keypress():\n global keys\n keys['space'] = keys['quit'] = keys['next'] = False\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys['space'] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys['quit'] = True\n\n\ndone = False\nmlcd.init(16, 2)\nlasttime = time.time()\ncurtime = 0.0\nwhile not done:\n curtime = time.time()\n if curtime - lasttime > 1 / game['speed']:\n lasttime = curtime\n if screenbuff[0][player['position']] == OBSTACLE_CHAR or screenbuff[1][\n player['position']] == OBSTACLE_CHAR:\n player['score'] += 1\n game['obstacle'] -= 1\n game['level'] += 0.5\n game['speed'] += 0.05\n for lindex, lin in enumerate(screenbuff, start=0):\n for index, pos in enumerate(lin, start=0):\n if index > 0:\n screenbuff[lindex][index - 1] = pos\n screenbuff[0][-1] = ' '\n screenbuff[1][-1] = ' '\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2\n ] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 1:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game['obstacle'] < int(game['level']) and random.choice([0, 1]):\n lin_temp = random.choice([0, 1])\n if lin_temp == 0:\n screenbuff[lin_temp][-1] = OBSTACLE_CHAR\n game['obstacle'] += 1\n if screenbuff[player['line']][player['position']] == OBSTACLE_CHAR:\n done = True\n screenbuff[player['line']][player['position']] = PLAYER_CHAR\n lines = [''.join(screenbuff[0]) + '|scr', ''.join(screenbuff[1]) + '|' +\n str(player['score'])]\n mlcd.draw(lines)\n screenbuff[player['line']][player['position']] = ' '\n keypress()\n if keys['space']:\n if player['line'] == 0:\n player['line'] = 1\n else:\n player['line'] = 0\n if keys['quit']:\n print('game quit')\n done = True\npygame.quit()\n",
"step-5": "import mlcd,pygame,time,random\n\nPLAYER_CHAR=\">\"\nOBSTACLE_CHAR=\"|\"\n\nscreenbuff=[[\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"],\n [\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]]\n\nplayer={\"position\":0,\"line\":0,\"score\":000}\ngame={\"speed\":4.05,\"level\":2.5,\"obstacle\":0} \nkeys={\"space\":False,\"quit\":False,\"next\":False}\n\ndef keypress(): #get keypresses\n global keys\n keys[\"space\"]=keys[\"quit\"]=keys[\"next\"]=False #reset all keys\n #check keys\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n keys[\"space\"] = True\n elif event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n keys[\"quit\"] = True\n\n \n \n\ndone=False\n#initialize mlcd as 16x2 character lcd\nmlcd.init(16,2)\nlasttime=time.time()\ncurtime=0.0\n\nwhile not done:\n curtime=time.time()\n if (curtime-lasttime>1/game[\"speed\"]):\n lasttime=curtime\n\n\n #increment score and count obstacle\n #up the level and increase the speed\n if screenbuff[0][player[\"position\"]]==OBSTACLE_CHAR or screenbuff[1][player[\"position\"]]==OBSTACLE_CHAR:\n player[\"score\"]+=1\n game[\"obstacle\"]-=1\n game[\"level\"]+=0.5\n game[\"speed\"]+=0.05\n #if((game[\"level\"]+2)%game[\"posmovthres\"]==0 and player[\"position\"]<12 and screenbuff[player[\"line\"]][player[\"position\"]+1]!=OBSTACLE_CHAR and screenbuff[player[\"line\"]][player[\"position\"]+2]!=OBSTACLE_CHAR):\n # player[\"position\"]+=1\n\n #move everything one place to the left\n for lindex,lin in enumerate(screenbuff,start=0):\n for index,pos in enumerate(lin, start=0):\n if index>0:\n screenbuff[lindex][index-1]=pos\n \n #add new chars at end of buff , obstacles if there is a gap\n screenbuff[0][-1]=\" \"\n screenbuff[1][-1]=\" \"\n if screenbuff[0][-2] != OBSTACLE_CHAR and screenbuff[1][-2]!=OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n elif screenbuff[0][-2] != OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n if(lin_temp==1):\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n elif screenbuff[1][-2] != OBSTACLE_CHAR:\n if game[\"obstacle\"]<int(game[\"level\"]) and random.choice([0,1]):\n lin_temp=random.choice([0,1])\n if(lin_temp==0):\n screenbuff[lin_temp][-1]=OBSTACLE_CHAR\n game[\"obstacle\"]+=1\n \n\n #check for collision\n if screenbuff[player[\"line\"]][player[\"position\"]]==OBSTACLE_CHAR:\n done=True #player lost\n #add player to the buffer\n screenbuff[player[\"line\"]][player[\"position\"]]=PLAYER_CHAR\n #ready the lines for drawing on lcd\n lines=[''.join(screenbuff[0]) + \"|scr\",\n ''.join(screenbuff[1]) + \"|\"+str(player[\"score\"])]\n mlcd.draw(lines)\n \n #remove player from buffer\n screenbuff[player[\"line\"]][player[\"position\"]]=\" \"\n #get keypresses\n keypress()\n #modify player line (move the player) if space is pressed\n if keys[\"space\"]:\n if player[\"line\"]==0:\n player[\"line\"]=1\n else:\n player[\"line\"]=0\n #quit\n if keys[\"quit\"]:\n print(\"game quit\")\n done=True\npygame.quit()\n \n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass_with_properties
class ExternalMap:
external_id: str
verified_using: List[IntegrityMethod] = field(default_factory=list)
location_hint: Optional[str] = None
defining_document: Optional[str] = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass_with_properties
class ExternalMap:
external_id: str
verified_using: List[IntegrityMethod] = field(default_factory=list)
location_hint: Optional[str] = None
defining_document: Optional[str] = None
def __init__(self, external_id: str, verified_using: List[
IntegrityMethod]=None, location_hint: Optional[str]=None,
defining_document: Optional[str]=None):
verified_using = [] if verified_using is None else verified_using
check_types_and_set_values(self, locals())
<|reserved_special_token_1|>
from dataclasses import field
from beartype.typing import List, Optional
from spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties
from spdx_tools.common.typing.type_checks import check_types_and_set_values
from spdx_tools.spdx3.model import IntegrityMethod
@dataclass_with_properties
class ExternalMap:
external_id: str
verified_using: List[IntegrityMethod] = field(default_factory=list)
location_hint: Optional[str] = None
defining_document: Optional[str] = None
def __init__(self, external_id: str, verified_using: List[
IntegrityMethod]=None, location_hint: Optional[str]=None,
defining_document: Optional[str]=None):
verified_using = [] if verified_using is None else verified_using
check_types_and_set_values(self, locals())
<|reserved_special_token_1|>
# SPDX-FileCopyrightText: 2023 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
from dataclasses import field
from beartype.typing import List, Optional
from spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties
from spdx_tools.common.typing.type_checks import check_types_and_set_values
from spdx_tools.spdx3.model import IntegrityMethod
@dataclass_with_properties
class ExternalMap:
external_id: str # anyURI
verified_using: List[IntegrityMethod] = field(default_factory=list)
location_hint: Optional[str] = None # anyURI
defining_document: Optional[str] = None
def __init__(
self,
external_id: str,
verified_using: List[IntegrityMethod] = None,
location_hint: Optional[str] = None,
defining_document: Optional[str] = None,
):
verified_using = [] if verified_using is None else verified_using
check_types_and_set_values(self, locals())
|
flexible
|
{
"blob_id": "1c085ea8f9b21ea7bef94ad4ecbb1771a57f697a",
"index": 2208,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n <mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n\n def __init__(self, external_id: str, verified_using: List[\n IntegrityMethod]=None, location_hint: Optional[str]=None,\n defining_document: Optional[str]=None):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-4": "from dataclasses import field\nfrom beartype.typing import List, Optional\nfrom spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties\nfrom spdx_tools.common.typing.type_checks import check_types_and_set_values\nfrom spdx_tools.spdx3.model import IntegrityMethod\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None\n defining_document: Optional[str] = None\n\n def __init__(self, external_id: str, verified_using: List[\n IntegrityMethod]=None, location_hint: Optional[str]=None,\n defining_document: Optional[str]=None):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-5": "# SPDX-FileCopyrightText: 2023 spdx contributors\n#\n# SPDX-License-Identifier: Apache-2.0\nfrom dataclasses import field\n\nfrom beartype.typing import List, Optional\n\nfrom spdx_tools.common.typing.dataclass_with_properties import dataclass_with_properties\nfrom spdx_tools.common.typing.type_checks import check_types_and_set_values\nfrom spdx_tools.spdx3.model import IntegrityMethod\n\n\n@dataclass_with_properties\nclass ExternalMap:\n external_id: str # anyURI\n verified_using: List[IntegrityMethod] = field(default_factory=list)\n location_hint: Optional[str] = None # anyURI\n defining_document: Optional[str] = None\n\n def __init__(\n self,\n external_id: str,\n verified_using: List[IntegrityMethod] = None,\n location_hint: Optional[str] = None,\n defining_document: Optional[str] = None,\n ):\n verified_using = [] if verified_using is None else verified_using\n check_types_and_set_values(self, locals())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.