code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import os
import random
readpath = './DBLP/'
writepath = './DBLP/'
dataname = 'dblp.txt'
labelname = 'node2label.txt'
testsetname = writepath + 'dblp_testset.txt'
def run(save_rate):
rdataname = readpath + dataname
rlabelname = readpath + labelname
wdataname = writepath + dataname
wlabelname = writepath + labelname
ordata = []
all_user = set()
all_time = set()
rename = dict()
newdatasize = 0
with open(rdataname, 'r') as r:
for line in r:
x = line.strip('\n').split()
x[2] = float(x[2])
ordata.append(x)
ordata = sorted(ordata, key = lambda x:x[2])
datasize = len(ordata)
savesize = int(datasize * save_rate)
print("原始数据中共有 %d 条\n预计保留 %d 条" % (datasize, savesize))
while(savesize != datasize and ordata[savesize-1][2] == ordata[savesize][2]):
savesize = savesize + 1
print("实际保留 %d 条" % savesize)
print("实际切割比例" + str(savesize/datasize))
for i in range(savesize):
x = ordata[i]
a = str(x[0])
b = str(x[1])
all_user.update({a,b})
#print(len(all_user))
all_time.add(x[2])
print("实际保留数据中,用户数量 %d 个,不同时间节点 %d 个" %(len(all_user), len(all_time)))
newdatasize = savesize
list_all_user = list(all_user)
list_all_user = [int(i) for i in list_all_user]
list_all_user.sort()
step = 0
for i in list_all_user:
rename[i] = step
#print(i, rename[i])
step = step + 1
flag = os.path.exists(writepath)
if not flag:
os.makedirs(writepath)
with open(wdataname, 'w') as w:
for i in range(newdatasize):
x = ordata[i]
a = str(rename[int(x[0])])
b = str(rename[int(x[1])])
w.write(a + ' ' + b + ' ' + str(x[2])+'\n')
with open(testsetname, 'w') as w:
index = 0
for i in range(newdatasize,datasize):
x = ordata[i]
if(int(x[0]) not in rename or int(x[1]) not in rename):
continue
a = str(rename[int(x[0])])
b = str(rename[int(x[1])])
w.write(a + ' ' + b + ' ' + str(x[2])+'\n')
index = index+1
print('预计测试集剩余数量 %d'%(datasize-newdatasize+1))
print('测试集剩余数量 %d'%(index))
temp = 0
with open(rlabelname, 'r') as r:
with open(wlabelname, 'w') as w:
for line in r:
x = line.strip('\n').split()
if(x[0] in all_user):
temp = temp + 1
a = str(rename[int(x[0])])
w.write(a + ' ' + x[1] + '\n')
print("标签集数量 " + str(temp)+ " 个")
if __name__ == '__main__':
run(0.7)
|
normal
|
{
"blob_id": "4bd6a7c7fc6a788b2cb010f6513872bd3e0d396c",
"index": 5011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(save_rate):\n rdataname = readpath + dataname\n rlabelname = readpath + labelname\n wdataname = writepath + dataname\n wlabelname = writepath + labelname\n ordata = []\n all_user = set()\n all_time = set()\n rename = dict()\n newdatasize = 0\n with open(rdataname, 'r') as r:\n for line in r:\n x = line.strip('\\n').split()\n x[2] = float(x[2])\n ordata.append(x)\n ordata = sorted(ordata, key=lambda x: x[2])\n datasize = len(ordata)\n savesize = int(datasize * save_rate)\n print('原始数据中共有 %d 条\\n预计保留 %d 条' % (datasize, savesize))\n while savesize != datasize and ordata[savesize - 1][2] == ordata[\n savesize][2]:\n savesize = savesize + 1\n print('实际保留 %d 条' % savesize)\n print('实际切割比例' + str(savesize / datasize))\n for i in range(savesize):\n x = ordata[i]\n a = str(x[0])\n b = str(x[1])\n all_user.update({a, b})\n all_time.add(x[2])\n print('实际保留数据中,用户数量 %d 个,不同时间节点 %d 个' % (len(all_user), len(all_time)))\n newdatasize = savesize\n list_all_user = list(all_user)\n list_all_user = [int(i) for i in list_all_user]\n list_all_user.sort()\n step = 0\n for i in list_all_user:\n rename[i] = step\n step = step + 1\n flag = os.path.exists(writepath)\n if not flag:\n os.makedirs(writepath)\n with open(wdataname, 'w') as w:\n for i in range(newdatasize):\n x = ordata[i]\n a = str(rename[int(x[0])])\n b = str(rename[int(x[1])])\n w.write(a + ' ' + b + ' ' + str(x[2]) + '\\n')\n with open(testsetname, 'w') as w:\n index = 0\n for i in range(newdatasize, datasize):\n x = ordata[i]\n if int(x[0]) not in rename or int(x[1]) not in rename:\n continue\n a = str(rename[int(x[0])])\n b = str(rename[int(x[1])])\n w.write(a + ' ' + b + ' ' + str(x[2]) + '\\n')\n index = index + 1\n print('预计测试集剩余数量 %d' % (datasize - newdatasize + 1))\n print('测试集剩余数量 %d' % index)\n temp = 0\n with open(rlabelname, 'r') as r:\n with open(wlabelname, 'w') as w:\n for line in r:\n x = line.strip('\\n').split()\n if x[0] in all_user:\n temp = temp + 1\n a = str(rename[int(x[0])])\n w.write(a + ' ' + x[1] + '\\n')\n print('标签集数量 ' + str(temp) + ' 个')\n\n\nif __name__ == '__main__':\n run(0.7)\n",
"step-3": "<mask token>\nreadpath = './DBLP/'\nwritepath = './DBLP/'\ndataname = 'dblp.txt'\nlabelname = 'node2label.txt'\ntestsetname = writepath + 'dblp_testset.txt'\n\n\ndef run(save_rate):\n rdataname = readpath + dataname\n rlabelname = readpath + labelname\n wdataname = writepath + dataname\n wlabelname = writepath + labelname\n ordata = []\n all_user = set()\n all_time = set()\n rename = dict()\n newdatasize = 0\n with open(rdataname, 'r') as r:\n for line in r:\n x = line.strip('\\n').split()\n x[2] = float(x[2])\n ordata.append(x)\n ordata = sorted(ordata, key=lambda x: x[2])\n datasize = len(ordata)\n savesize = int(datasize * save_rate)\n print('原始数据中共有 %d 条\\n预计保留 %d 条' % (datasize, savesize))\n while savesize != datasize and ordata[savesize - 1][2] == ordata[\n savesize][2]:\n savesize = savesize + 1\n print('实际保留 %d 条' % savesize)\n print('实际切割比例' + str(savesize / datasize))\n for i in range(savesize):\n x = ordata[i]\n a = str(x[0])\n b = str(x[1])\n all_user.update({a, b})\n all_time.add(x[2])\n print('实际保留数据中,用户数量 %d 个,不同时间节点 %d 个' % (len(all_user), len(all_time)))\n newdatasize = savesize\n list_all_user = list(all_user)\n list_all_user = [int(i) for i in list_all_user]\n list_all_user.sort()\n step = 0\n for i in list_all_user:\n rename[i] = step\n step = step + 1\n flag = os.path.exists(writepath)\n if not flag:\n os.makedirs(writepath)\n with open(wdataname, 'w') as w:\n for i in range(newdatasize):\n x = ordata[i]\n a = str(rename[int(x[0])])\n b = str(rename[int(x[1])])\n w.write(a + ' ' + b + ' ' + str(x[2]) + '\\n')\n with open(testsetname, 'w') as w:\n index = 0\n for i in range(newdatasize, datasize):\n x = ordata[i]\n if int(x[0]) not in rename or int(x[1]) not in rename:\n continue\n a = str(rename[int(x[0])])\n b = str(rename[int(x[1])])\n w.write(a + ' ' + b + ' ' + str(x[2]) + '\\n')\n index = index + 1\n print('预计测试集剩余数量 %d' % (datasize - newdatasize + 1))\n print('测试集剩余数量 %d' % index)\n temp = 0\n with open(rlabelname, 'r') as r:\n with open(wlabelname, 'w') as w:\n for line in r:\n x = line.strip('\\n').split()\n if x[0] in all_user:\n temp = temp + 1\n a = str(rename[int(x[0])])\n w.write(a + ' ' + x[1] + '\\n')\n print('标签集数量 ' + str(temp) + ' 个')\n\n\nif __name__ == '__main__':\n run(0.7)\n",
"step-4": "import os\nimport random\nreadpath = './DBLP/'\nwritepath = './DBLP/'\ndataname = 'dblp.txt'\nlabelname = 'node2label.txt'\ntestsetname = writepath + 'dblp_testset.txt'\n\n\ndef run(save_rate):\n rdataname = readpath + dataname\n rlabelname = readpath + labelname\n wdataname = writepath + dataname\n wlabelname = writepath + labelname\n ordata = []\n all_user = set()\n all_time = set()\n rename = dict()\n newdatasize = 0\n with open(rdataname, 'r') as r:\n for line in r:\n x = line.strip('\\n').split()\n x[2] = float(x[2])\n ordata.append(x)\n ordata = sorted(ordata, key=lambda x: x[2])\n datasize = len(ordata)\n savesize = int(datasize * save_rate)\n print('原始数据中共有 %d 条\\n预计保留 %d 条' % (datasize, savesize))\n while savesize != datasize and ordata[savesize - 1][2] == ordata[\n savesize][2]:\n savesize = savesize + 1\n print('实际保留 %d 条' % savesize)\n print('实际切割比例' + str(savesize / datasize))\n for i in range(savesize):\n x = ordata[i]\n a = str(x[0])\n b = str(x[1])\n all_user.update({a, b})\n all_time.add(x[2])\n print('实际保留数据中,用户数量 %d 个,不同时间节点 %d 个' % (len(all_user), len(all_time)))\n newdatasize = savesize\n list_all_user = list(all_user)\n list_all_user = [int(i) for i in list_all_user]\n list_all_user.sort()\n step = 0\n for i in list_all_user:\n rename[i] = step\n step = step + 1\n flag = os.path.exists(writepath)\n if not flag:\n os.makedirs(writepath)\n with open(wdataname, 'w') as w:\n for i in range(newdatasize):\n x = ordata[i]\n a = str(rename[int(x[0])])\n b = str(rename[int(x[1])])\n w.write(a + ' ' + b + ' ' + str(x[2]) + '\\n')\n with open(testsetname, 'w') as w:\n index = 0\n for i in range(newdatasize, datasize):\n x = ordata[i]\n if int(x[0]) not in rename or int(x[1]) not in rename:\n continue\n a = str(rename[int(x[0])])\n b = str(rename[int(x[1])])\n w.write(a + ' ' + b + ' ' + str(x[2]) + '\\n')\n index = index + 1\n print('预计测试集剩余数量 %d' % (datasize - newdatasize + 1))\n print('测试集剩余数量 %d' % index)\n temp = 0\n with open(rlabelname, 'r') as r:\n with open(wlabelname, 'w') as w:\n for line in r:\n x = line.strip('\\n').split()\n if x[0] in all_user:\n temp = temp + 1\n a = str(rename[int(x[0])])\n w.write(a + ' ' + x[1] + '\\n')\n print('标签集数量 ' + str(temp) + ' 个')\n\n\nif __name__ == '__main__':\n run(0.7)\n",
"step-5": "import os\nimport random\n\nreadpath = './DBLP/'\nwritepath = './DBLP/'\ndataname = 'dblp.txt'\nlabelname = 'node2label.txt'\ntestsetname = writepath + 'dblp_testset.txt'\n\ndef run(save_rate):\n\trdataname = readpath + dataname\n\trlabelname = readpath + labelname\n\twdataname = writepath + dataname\n\twlabelname = writepath + labelname\n\t\n\tordata = []\n\tall_user = set()\n\tall_time = set()\n\trename = dict()\n\tnewdatasize = 0\n\n\twith open(rdataname, 'r') as r:\n\t\tfor line in r:\n\t\t\tx = line.strip('\\n').split()\n\t\t\tx[2] = float(x[2])\n\t\t\tordata.append(x)\n\t\tordata = sorted(ordata, key = lambda x:x[2])\n\t\t\n\t\tdatasize = len(ordata)\n\t\tsavesize = int(datasize * save_rate)\n\t\tprint(\"原始数据中共有 %d 条\\n预计保留 %d 条\" % (datasize, savesize))\n\n\t\twhile(savesize != datasize and ordata[savesize-1][2] == ordata[savesize][2]):\n\t\t\tsavesize = savesize + 1\n\t\tprint(\"实际保留 %d 条\" % savesize)\n\t\tprint(\"实际切割比例\" + str(savesize/datasize))\n\t\t\n\t\tfor i in range(savesize):\n\t\t\tx = ordata[i]\n\t\t\ta = str(x[0])\n\t\t\tb = str(x[1])\n\t\t\tall_user.update({a,b})\n\t\t\t#print(len(all_user))\n\t\t\tall_time.add(x[2])\n\t\tprint(\"实际保留数据中,用户数量 %d 个,不同时间节点 %d 个\" %(len(all_user), len(all_time)))\n\t\tnewdatasize = savesize\n\t\t\n\n\t\tlist_all_user = list(all_user)\n\t\tlist_all_user = [int(i) for i in list_all_user]\n\t\tlist_all_user.sort()\n\t\tstep = 0\n\t\tfor i in list_all_user:\n\t\t\trename[i] = step\n\t\t\t#print(i, rename[i])\n\t\t\tstep = step + 1\n\t\t\t\n\t\t\n\n\t\tflag = os.path.exists(writepath)\n\t\tif not flag:\n\t\t\tos.makedirs(writepath)\n\n\t\twith open(wdataname, 'w') as w:\n\t\t\tfor i in range(newdatasize):\n\t\t\t\tx = ordata[i]\n\t\t\t\ta = str(rename[int(x[0])])\n\t\t\t\tb = str(rename[int(x[1])])\n\t\t\t\tw.write(a + ' ' + b + ' ' + str(x[2])+'\\n')\n\n\n\t\twith open(testsetname, 'w') as w:\n\t\t\tindex = 0\n\t\t\tfor i in range(newdatasize,datasize):\n\t\t\t\tx = ordata[i]\n\n\t\t\t\tif(int(x[0]) not in rename or int(x[1]) not in rename):\n\t\t\t\t\tcontinue\n\t\t\t\ta = str(rename[int(x[0])])\n\t\t\t\tb = str(rename[int(x[1])])\n\t\t\t\tw.write(a + ' ' + b + ' ' + str(x[2])+'\\n')\n\t\t\t\tindex = index+1\n\t\t\tprint('预计测试集剩余数量 %d'%(datasize-newdatasize+1))\n\t\t\tprint('测试集剩余数量 %d'%(index))\n\n\t\ttemp = 0\n\t\twith open(rlabelname, 'r') as r:\n\t\t\twith open(wlabelname, 'w') as w:\n\t\t\t\tfor line in r:\n\t\t\t\t\tx = line.strip('\\n').split()\n\t\t\t\t\tif(x[0] in all_user):\n\t\t\t\t\t\ttemp = temp + 1\n\t\t\t\t\t\ta = str(rename[int(x[0])])\n\t\t\t\t\t\tw.write(a + ' ' + x[1] + '\\n')\n\t\tprint(\"标签集数量 \" + str(temp)+ \" 个\")\n\t\nif __name__ == '__main__':\n\trun(0.7)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# obtain the dataset
import pandas as pd
titanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')
#titanic.info()
print(titanic.head())
# preprocessing
x = titanic.drop(['row.names', 'name', 'survived'], axis=1)
y = titanic['survived']
x['age'].fillna(x['age'].mean(), inplace = True) # add data for age feature
x.fillna('UNKNOWN', inplace=True)
# split
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=33)
#feature extraction
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
x_train = vec.fit_transform(x_train.to_dict(orient='record'))
x_test = vec.transform(x_test.to_dict(orient='record'))
#print(len(vec.feature_names_))
# import decision tree model
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier(criterion='entropy')
dtc.fit(x_train, y_train)
#y_predict = dtc.predict(x_test)
print(dtc.score(x_test, y_test))
from sklearn import feature_selection
fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)
x_train_fs = fs.fit_transform(x_train, y_train)
dtc.fit(x_train_fs, y_train)
x_test_fs = fs.transform(x_test)
print(dtc.score(x_test_fs, y_test))
|
normal
|
{
"blob_id": "f1475d651c3b52611657a9767ad62796b55d8711",
"index": 3676,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(titanic.head())\n<mask token>\nx['age'].fillna(x['age'].mean(), inplace=True)\nx.fillna('UNKNOWN', inplace=True)\n<mask token>\ndtc.fit(x_train, y_train)\nprint(dtc.score(x_test, y_test))\n<mask token>\ndtc.fit(x_train_fs, y_train)\n<mask token>\nprint(dtc.score(x_test_fs, y_test))\n",
"step-3": "<mask token>\ntitanic = pd.read_csv(\n 'http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')\nprint(titanic.head())\nx = titanic.drop(['row.names', 'name', 'survived'], axis=1)\ny = titanic['survived']\nx['age'].fillna(x['age'].mean(), inplace=True)\nx.fillna('UNKNOWN', inplace=True)\n<mask token>\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,\n random_state=33)\n<mask token>\nvec = DictVectorizer()\nx_train = vec.fit_transform(x_train.to_dict(orient='record'))\nx_test = vec.transform(x_test.to_dict(orient='record'))\n<mask token>\ndtc = DecisionTreeClassifier(criterion='entropy')\ndtc.fit(x_train, y_train)\nprint(dtc.score(x_test, y_test))\n<mask token>\nfs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)\nx_train_fs = fs.fit_transform(x_train, y_train)\ndtc.fit(x_train_fs, y_train)\nx_test_fs = fs.transform(x_test)\nprint(dtc.score(x_test_fs, y_test))\n",
"step-4": "import pandas as pd\ntitanic = pd.read_csv(\n 'http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')\nprint(titanic.head())\nx = titanic.drop(['row.names', 'name', 'survived'], axis=1)\ny = titanic['survived']\nx['age'].fillna(x['age'].mean(), inplace=True)\nx.fillna('UNKNOWN', inplace=True)\nfrom sklearn.cross_validation import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,\n random_state=33)\nfrom sklearn.feature_extraction import DictVectorizer\nvec = DictVectorizer()\nx_train = vec.fit_transform(x_train.to_dict(orient='record'))\nx_test = vec.transform(x_test.to_dict(orient='record'))\nfrom sklearn.tree import DecisionTreeClassifier\ndtc = DecisionTreeClassifier(criterion='entropy')\ndtc.fit(x_train, y_train)\nprint(dtc.score(x_test, y_test))\nfrom sklearn import feature_selection\nfs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)\nx_train_fs = fs.fit_transform(x_train, y_train)\ndtc.fit(x_train_fs, y_train)\nx_test_fs = fs.transform(x_test)\nprint(dtc.score(x_test_fs, y_test))\n",
"step-5": "# obtain the dataset\r\nimport pandas as pd\r\n\r\ntitanic = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')\r\n#titanic.info()\r\nprint(titanic.head())\r\n\r\n\r\n# preprocessing\r\nx = titanic.drop(['row.names', 'name', 'survived'], axis=1)\r\ny = titanic['survived']\r\n\r\nx['age'].fillna(x['age'].mean(), inplace = True) # add data for age feature\r\nx.fillna('UNKNOWN', inplace=True)\r\n\r\n# split\r\nfrom sklearn.cross_validation import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=33)\r\n\r\n\r\n#feature extraction\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nvec = DictVectorizer()\r\nx_train = vec.fit_transform(x_train.to_dict(orient='record'))\r\nx_test = vec.transform(x_test.to_dict(orient='record'))\r\n#print(len(vec.feature_names_))\r\n\r\n# import decision tree model\r\nfrom sklearn.tree import DecisionTreeClassifier\r\ndtc = DecisionTreeClassifier(criterion='entropy')\r\ndtc.fit(x_train, y_train)\r\n#y_predict = dtc.predict(x_test)\r\nprint(dtc.score(x_test, y_test))\r\n\r\nfrom sklearn import feature_selection\r\nfs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)\r\nx_train_fs = fs.fit_transform(x_train, y_train)\r\ndtc.fit(x_train_fs, y_train)\r\nx_test_fs = fs.transform(x_test)\r\nprint(dtc.score(x_test_fs, y_test))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RefrigeratorRaider:
"""Raid a refrigerator"""
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RefrigeratorRaider:
"""Raid a refrigerator"""
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
raid('bacon')
raid('deep fried pizza')
<|reserved_special_token_1|>
__author__ = 'Prikly Grayp'
__license__ = 'MIT'
__version__ = '1.0.0'
__email__ = 'priklygrayp@gmail.com'
__status__ = 'Development'
<|reserved_special_token_0|>
class RefrigeratorRaider:
"""Raid a refrigerator"""
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
raid('bacon')
raid('deep fried pizza')
<|reserved_special_token_1|>
__author__ = 'Prikly Grayp'
__license__ = 'MIT'
__version__ = '1.0.0'
__email__ = 'priklygrayp@gmail.com'
__status__ = 'Development'
from contextlib import closing
class RefrigeratorRaider:
"""Raid a refrigerator"""
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
raid('bacon')
raid('deep fried pizza')
<|reserved_special_token_1|>
__author__ = "Prikly Grayp"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "priklygrayp@gmail.com"
__status__ = "Development"
from contextlib import closing
class RefrigeratorRaider:
'''Raid a refrigerator'''
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
raid('bacon')
raid('deep fried pizza')
|
flexible
|
{
"blob_id": "7455eb670c2c019b8d066fcc6f2878a2136b7fd0",
"index": 5051,
"step-1": "<mask token>\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\nraid('bacon')\nraid('deep fried pizza')\n",
"step-3": "__author__ = 'Prikly Grayp'\n__license__ = 'MIT'\n__version__ = '1.0.0'\n__email__ = 'priklygrayp@gmail.com'\n__status__ = 'Development'\n<mask token>\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\nraid('bacon')\nraid('deep fried pizza')\n",
"step-4": "__author__ = 'Prikly Grayp'\n__license__ = 'MIT'\n__version__ = '1.0.0'\n__email__ = 'priklygrayp@gmail.com'\n__status__ = 'Development'\nfrom contextlib import closing\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\nraid('bacon')\nraid('deep fried pizza')\n",
"step-5": "__author__ = \"Prikly Grayp\"\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__email__ = \"priklygrayp@gmail.com\"\n__status__ = \"Development\"\n\nfrom contextlib import closing\n\nclass RefrigeratorRaider:\n '''Raid a refrigerator'''\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\nraid('bacon')\nraid('deep fried pizza')",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
#!/usr/bin/env python
#
# Copyright 2017-2021 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from sqlalchemy.exc import *
from Pegasus.db.admin.admin_loader import *
from Pegasus.db.admin.versions.base_version import BaseVersion
from Pegasus.db.schema import *
DB_VERSION = 8
log = logging.getLogger(__name__)
class Version(BaseVersion):
def __init__(self, connection):
super().__init__(connection)
def update(self, force=False):
"""
:param force:
:return:
"""
log.info("Updating to version %s" % DB_VERSION)
try:
log.info("Updating master_workflowstate...")
self.db.execute("ALTER TABLE master_workflowstate ADD reason TEXT NULL")
except (OperationalError, ProgrammingError):
pass
except Exception as e:
self.db.rollback()
log.exception(e)
raise Exception(e)
self.db.commit()
def downgrade(self, force=False):
"Downgrade is not necessary as reason accepts NULL values"
|
normal
|
{
"blob_id": "12fd4e3bfb6821205a9b65b4d236b4158ec4ef1e",
"index": 7345,
"step-1": "<mask token>\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-3": "<mask token>\nDB_VERSION = 8\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-4": "import logging\nfrom sqlalchemy.exc import *\nfrom Pegasus.db.admin.admin_loader import *\nfrom Pegasus.db.admin.versions.base_version import BaseVersion\nfrom Pegasus.db.schema import *\nDB_VERSION = 8\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info('Updating to version %s' % DB_VERSION)\n try:\n log.info('Updating master_workflowstate...')\n self.db.execute(\n 'ALTER TABLE master_workflowstate ADD reason TEXT NULL')\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n self.db.commit()\n\n def downgrade(self, force=False):\n \"\"\"Downgrade is not necessary as reason accepts NULL values\"\"\"\n",
"step-5": "#!/usr/bin/env python\n#\n# Copyright 2017-2021 University Of Southern California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport logging\n\nfrom sqlalchemy.exc import *\n\nfrom Pegasus.db.admin.admin_loader import *\nfrom Pegasus.db.admin.versions.base_version import BaseVersion\nfrom Pegasus.db.schema import *\n\nDB_VERSION = 8\n\nlog = logging.getLogger(__name__)\n\n\nclass Version(BaseVersion):\n def __init__(self, connection):\n super().__init__(connection)\n\n def update(self, force=False):\n \"\"\"\n\n :param force:\n :return:\n \"\"\"\n log.info(\"Updating to version %s\" % DB_VERSION)\n try:\n log.info(\"Updating master_workflowstate...\")\n self.db.execute(\"ALTER TABLE master_workflowstate ADD reason TEXT NULL\")\n except (OperationalError, ProgrammingError):\n pass\n except Exception as e:\n self.db.rollback()\n log.exception(e)\n raise Exception(e)\n\n self.db.commit()\n\n def downgrade(self, force=False):\n \"Downgrade is not necessary as reason accepts NULL values\"\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from torchvision import datasets, transforms
import torch
def load_data(data_folder, batch_size, train, num_workers=0, **kwargs):
transform = {
'train': transforms.Compose(
[transforms.Resize([256, 256]),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]),
'test': transforms.Compose(
[transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
}
data = datasets.ImageFolder(root=data_folder, transform=transform['train' if train else 'test'])
data_loader = get_data_loader(data, batch_size=batch_size,
shuffle=True if train else False,
num_workers=num_workers, **kwargs, drop_last=True if train else False)
n_class = len(data.classes)
return data_loader, n_class
def get_data_loader(dataset, batch_size, shuffle=True, drop_last=False, num_workers=0, infinite_data_loader=False, **kwargs):
if not infinite_data_loader:
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=num_workers, **kwargs)
else:
return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=num_workers, **kwargs)
class _InfiniteSampler(torch.utils.data.Sampler):
"""Wraps another Sampler to yield an infinite stream."""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
for batch in self.sampler:
yield batch
class InfiniteDataLoader:
def __init__(self, dataset, batch_size, shuffle=True, drop_last=False, num_workers=0, weights=None, **kwargs):
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights,
replacement=False,
num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset,
replacement=False)
batch_sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=batch_size,
drop_last=drop_last)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
def __iter__(self):
while True:
yield next(self._infinite_iterator)
def __len__(self):
return 0 # Always return 0
|
normal
|
{
"blob_id": "d99fd3dc63f6a40dde5a6230111b9f3598d3c5fd",
"index": 7830,
"step-1": "<mask token>\n\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\n\nclass InfiniteDataLoader:\n\n def __init__(self, dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, weights=None, **kwargs):\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=False, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=False\n )\n batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=\n batch_size, drop_last=drop_last)\n self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset,\n num_workers=num_workers, batch_sampler=_InfiniteSampler(\n batch_sampler)))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return 0\n",
"step-2": "<mask token>\n\n\ndef get_data_loader(dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, infinite_data_loader=False, **kwargs):\n if not infinite_data_loader:\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True, drop_last=drop_last, num_workers=num_workers, **\n kwargs)\n else:\n return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=\n True, drop_last=drop_last, num_workers=num_workers, **kwargs)\n\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\n\nclass InfiniteDataLoader:\n\n def __init__(self, dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, weights=None, **kwargs):\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=False, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=False\n )\n batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=\n batch_size, drop_last=drop_last)\n self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset,\n num_workers=num_workers, batch_sampler=_InfiniteSampler(\n batch_sampler)))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return 0\n",
"step-3": "<mask token>\n\n\ndef load_data(data_folder, batch_size, train, num_workers=0, **kwargs):\n transform = {'train': transforms.Compose([transforms.Resize([256, 256]),\n transforms.RandomCrop(224), transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, \n 0.406], std=[0.229, 0.224, 0.225])]), 'test': transforms.Compose([\n transforms.Resize([224, 224]), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])}\n data = datasets.ImageFolder(root=data_folder, transform=transform[\n 'train' if train else 'test'])\n data_loader = get_data_loader(data, batch_size=batch_size, shuffle=True if\n train else False, num_workers=num_workers, **kwargs, drop_last=True if\n train else False)\n n_class = len(data.classes)\n return data_loader, n_class\n\n\ndef get_data_loader(dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, infinite_data_loader=False, **kwargs):\n if not infinite_data_loader:\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True, drop_last=drop_last, num_workers=num_workers, **\n kwargs)\n else:\n return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=\n True, drop_last=drop_last, num_workers=num_workers, **kwargs)\n\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\n\nclass InfiniteDataLoader:\n\n def __init__(self, dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, weights=None, **kwargs):\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=False, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=False\n )\n batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=\n batch_size, drop_last=drop_last)\n self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset,\n num_workers=num_workers, batch_sampler=_InfiniteSampler(\n batch_sampler)))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return 0\n",
"step-4": "from torchvision import datasets, transforms\nimport torch\n\n\ndef load_data(data_folder, batch_size, train, num_workers=0, **kwargs):\n transform = {'train': transforms.Compose([transforms.Resize([256, 256]),\n transforms.RandomCrop(224), transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, \n 0.406], std=[0.229, 0.224, 0.225])]), 'test': transforms.Compose([\n transforms.Resize([224, 224]), transforms.ToTensor(), transforms.\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])}\n data = datasets.ImageFolder(root=data_folder, transform=transform[\n 'train' if train else 'test'])\n data_loader = get_data_loader(data, batch_size=batch_size, shuffle=True if\n train else False, num_workers=num_workers, **kwargs, drop_last=True if\n train else False)\n n_class = len(data.classes)\n return data_loader, n_class\n\n\ndef get_data_loader(dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, infinite_data_loader=False, **kwargs):\n if not infinite_data_loader:\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n shuffle=True, drop_last=drop_last, num_workers=num_workers, **\n kwargs)\n else:\n return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=\n True, drop_last=drop_last, num_workers=num_workers, **kwargs)\n\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\n\nclass InfiniteDataLoader:\n\n def __init__(self, dataset, batch_size, shuffle=True, drop_last=False,\n num_workers=0, weights=None, **kwargs):\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=False, num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset, replacement=False\n )\n batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=\n batch_size, drop_last=drop_last)\n self._infinite_iterator = iter(torch.utils.data.DataLoader(dataset,\n num_workers=num_workers, batch_sampler=_InfiniteSampler(\n batch_sampler)))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return 0\n",
"step-5": "from torchvision import datasets, transforms\nimport torch\n\ndef load_data(data_folder, batch_size, train, num_workers=0, **kwargs):\n transform = {\n 'train': transforms.Compose(\n [transforms.Resize([256, 256]),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])]),\n 'test': transforms.Compose(\n [transforms.Resize([224, 224]),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])])\n }\n data = datasets.ImageFolder(root=data_folder, transform=transform['train' if train else 'test'])\n data_loader = get_data_loader(data, batch_size=batch_size, \n shuffle=True if train else False, \n num_workers=num_workers, **kwargs, drop_last=True if train else False)\n n_class = len(data.classes)\n return data_loader, n_class\n\n\ndef get_data_loader(dataset, batch_size, shuffle=True, drop_last=False, num_workers=0, infinite_data_loader=False, **kwargs):\n if not infinite_data_loader:\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=num_workers, **kwargs)\n else:\n return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=num_workers, **kwargs)\n\nclass _InfiniteSampler(torch.utils.data.Sampler):\n \"\"\"Wraps another Sampler to yield an infinite stream.\"\"\"\n def __init__(self, sampler):\n self.sampler = sampler\n\n def __iter__(self):\n while True:\n for batch in self.sampler:\n yield batch\n\nclass InfiniteDataLoader:\n def __init__(self, dataset, batch_size, shuffle=True, drop_last=False, num_workers=0, weights=None, **kwargs):\n if weights is not None:\n sampler = torch.utils.data.WeightedRandomSampler(weights,\n replacement=False,\n num_samples=batch_size)\n else:\n sampler = torch.utils.data.RandomSampler(dataset,\n replacement=False)\n \n batch_sampler = torch.utils.data.BatchSampler(\n sampler,\n batch_size=batch_size,\n drop_last=drop_last)\n\n self._infinite_iterator = iter(torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=_InfiniteSampler(batch_sampler)\n ))\n\n def __iter__(self):\n while True:\n yield next(self._infinite_iterator)\n\n def __len__(self):\n return 0 # Always return 0",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
import SimpleITK as sitk
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# # Estimation function # #
# --------------------------- #
# Linear registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# mov_mask : List of GROUP masks [list]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# lin_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
# # MAJORITY VOTING # #
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
# # Classifier Function # #
# --------------------------- #
# Utilizes a trained random forest classifier by reading CT image and prints
# which slice has the highest probability of containing the pubic symphysis.
# --------------------------- #
# --- Input --- #
# ct_image : List of 2D axial slice images [list]
# classifier : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
# --- Output --- #
# None
def slice_probability(ct_image, classifier):
# Creating necessary lists
test_list = []
max_list = []
# Convert image to numpy array & resize
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
# Predict probabilities for each slice
probabilities = classifier.predict_proba(test_array)
# Fetching array with maximum probabilities
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
# Print result to user
if len(max_list) == 1:
print("Slice {} has highest probability which is: {}".format(max_list[0], max))
else:
print("Slices {} have the highest probability which is: {}".format(max_list, max))
return None
|
normal
|
{
"blob_id": "2b7d9ded82fa980eeae06beb2d84d89612d53df1",
"index": 821,
"step-1": "<mask token>\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\n<mask token>\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\ndef seg_atlas(common_img, ct_list, seg_list):\n seg = []\n image_list = []\n for i in range(len(ct_list)):\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=\n seg_list[i], show_parameters=False)\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(common_img)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetTransform(final_trafo)\n resampled_mask = resampler.Execute(seg_list[i])\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\n seg.append(resampled_mask_data)\n for i in range(len(seg)):\n for j in range(i + 1, len(seg)):\n arr1 = np.transpose(np.nonzero(seg[i]))\n arr2 = np.transpose(np.nonzero(seg[j]))\n arr1list = [tuple(e) for e in arr1.tolist()]\n arr2list = [tuple(e) for e in arr2.tolist()]\n arr1list.sort()\n arr2list.sort()\n intersections = list(set(arr1list).intersection(arr2list))\n intersections.sort()\n image_list.append(intersections)\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(\n image_list[2]))\n intersection_list.sort()\n image_array = sitk.GetArrayFromImage(common_img)\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\n for x, y, z in intersection_list:\n segmented_array[x, y, z] = 1\n return segmented_array\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\ndef seg_atlas(common_img, ct_list, seg_list):\n seg = []\n image_list = []\n for i in range(len(ct_list)):\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=\n seg_list[i], show_parameters=False)\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(common_img)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetTransform(final_trafo)\n resampled_mask = resampler.Execute(seg_list[i])\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\n seg.append(resampled_mask_data)\n for i in range(len(seg)):\n for j in range(i + 1, len(seg)):\n arr1 = np.transpose(np.nonzero(seg[i]))\n arr2 = np.transpose(np.nonzero(seg[j]))\n arr1list = [tuple(e) for e in arr1.tolist()]\n arr2list = [tuple(e) for e in arr2.tolist()]\n arr1list.sort()\n arr2list.sort()\n intersections = list(set(arr1list).intersection(arr2list))\n intersections.sort()\n image_list.append(intersections)\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(\n image_list[2]))\n intersection_list.sort()\n image_array = sitk.GetArrayFromImage(common_img)\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\n for x, y, z in intersection_list:\n segmented_array[x, y, z] = 1\n return segmented_array\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\ndef slice_probability(ct_image, classifier):\n test_list = []\n max_list = []\n im_array = sitk.GetArrayFromImage(ct_image)\n im_array.resize((512, 512, 512))\n for z in range(im_array.shape[2]):\n test_list.append(im_array[:, :, z].flatten())\n test_array = np.asarray(test_list, dtype=np.uint8)\n probabilities = classifier.predict_proba(test_array)\n max = np.amax(probabilities, axis=0)[1]\n for i, prob in enumerate(probabilities):\n if prob[1] == max:\n max_list.append(i)\n if len(max_list) == 1:\n print('Slice {} has highest probability which is: {}'.format(\n max_list[0], max))\n else:\n print('Slices {} have the highest probability which is: {}'.format(\n max_list, max))\n return None\n",
"step-4": "import SimpleITK as sitk\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\ndef seg_atlas(common_img, ct_list, seg_list):\n seg = []\n image_list = []\n for i in range(len(ct_list)):\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=\n seg_list[i], show_parameters=False)\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(common_img)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetTransform(final_trafo)\n resampled_mask = resampler.Execute(seg_list[i])\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\n seg.append(resampled_mask_data)\n for i in range(len(seg)):\n for j in range(i + 1, len(seg)):\n arr1 = np.transpose(np.nonzero(seg[i]))\n arr2 = np.transpose(np.nonzero(seg[j]))\n arr1list = [tuple(e) for e in arr1.tolist()]\n arr2list = [tuple(e) for e in arr2.tolist()]\n arr1list.sort()\n arr2list.sort()\n intersections = list(set(arr1list).intersection(arr2list))\n intersections.sort()\n image_list.append(intersections)\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(\n image_list[2]))\n intersection_list.sort()\n image_array = sitk.GetArrayFromImage(common_img)\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\n for x, y, z in intersection_list:\n segmented_array[x, y, z] = 1\n return segmented_array\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\ndef slice_probability(ct_image, classifier):\n test_list = []\n max_list = []\n im_array = sitk.GetArrayFromImage(ct_image)\n im_array.resize((512, 512, 512))\n for z in range(im_array.shape[2]):\n test_list.append(im_array[:, :, z].flatten())\n test_array = np.asarray(test_list, dtype=np.uint8)\n probabilities = classifier.predict_proba(test_array)\n max = np.amax(probabilities, axis=0)[1]\n for i, prob in enumerate(probabilities):\n if prob[1] == max:\n max_list.append(i)\n if len(max_list) == 1:\n print('Slice {} has highest probability which is: {}'.format(\n max_list[0], max))\n else:\n print('Slices {} have the highest probability which is: {}'.format(\n max_list, max))\n return None\n",
"step-5": "import SimpleITK as sitk\r\nimport numpy as np\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\r\n# # Estimation function # #\r\n# --------------------------- #\r\n# Linear registration function\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# im_ref : The common image [numpy.ndarray]\r\n# im_mov : The group image [numpy.ndarray]\r\n# mov_mask : List of GROUP masks [list]\r\n# show_parameters : If you want to see the parameters, false by default [boolean]\r\n\r\n\r\n# --- Output --- #\r\n# lin_xfm : Estimated transformation parameters [itk.simple.Transform]\r\n\r\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\r\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),\r\n sitk.CenteredTransformInitializerFilter.MOMENTS)\r\n\r\n # Initialize registration\r\n lin_transformation = sitk.ImageRegistrationMethod()\r\n\r\n # Set metrics\r\n lin_transformation.SetMetricAsMeanSquares()\r\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\r\n lin_transformation.SetMetricSamplingPercentage(0.01)\r\n\r\n # Set mask\r\n if mov_mask:\r\n lin_transformation.SetMetricMovingMask(mov_mask)\r\n\r\n # Gradient Descent optimizer\r\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,\r\n convergenceMinimumValue=1e-6, convergenceWindowSize=10)\r\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\r\n\r\n # Set the initial transformation\r\n lin_transformation.SetInitialTransform(initial_transform)\r\n\r\n # Switching to preferred variable\r\n lin_xfm = lin_transformation\r\n\r\n if show_parameters:\r\n print(lin_xfm)\r\n\r\n return lin_xfm\r\n\r\n\r\n# # Estimation function # #\r\n# --------------------------- #\r\n# Non-linear 'Demons' registration function\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# im_ref : The common image [numpy.ndarray]\r\n# fixed_mask : The mask of common image, default is None [numpy.ndarray]\r\n# show_parameters : If you want to see the parameters, false by default [boolean]\r\n\r\n\r\n# --- Output --- #\r\n# nl_xfm : Estimated transformation parameters [itk.simple.Transform]\r\n\r\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\r\n # Initialize the registration\r\n reg_method = sitk.ImageRegistrationMethod()\r\n\r\n # Create initial identity transformation.\r\n transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()\r\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\r\n initial_transform = sitk.DisplacementFieldTransform(\r\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\r\n\r\n # Regularization. The update field refers to fluid regularization; the total field to elastic regularization.\r\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)\r\n\r\n # Set the initial transformation\r\n reg_method.SetInitialTransform(initial_transform)\r\n\r\n # Set Demons registration\r\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\r\n\r\n # Evaluate the metrics only in the mask\r\n if fixed_mask is not None:\r\n reg_method.SetMetricFixedMask(fixed_mask)\r\n\r\n # Set a linear interpolator\r\n reg_method.SetInterpolator(sitk.sitkLinear)\r\n\r\n # Set a gradient descent optimizer\r\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,\r\n convergenceWindowSize=10)\r\n reg_method.SetOptimizerScalesFromPhysicalShift()\r\n\r\n # Switching to the preferred variable\r\n nl_xfm = reg_method\r\n\r\n if show_parameters:\r\n print(nl_xfm)\r\n\r\n return nl_xfm\r\n\r\n# # Application function # #\r\n# --------------------------- #\r\n# Executes either the linear or the non-linear function\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# im_ref : The common image [numpy.ndarray]\r\n# im_mov : The group image [numpy.ndarray]\r\n# trafo : The chosen transformation [numpy.ndarray]\r\n# show_parameters : If you want to see the parameters, false by default [boolean]\r\n\r\n\r\n# --- Output --- #\r\n# final_image : Returns the registered image [numpy.ndarray]\r\n\r\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\r\n # Perform registration (Executes it)\r\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))\r\n\r\n if show_parameters:\r\n print(transf)\r\n print(\"--------\")\r\n print(\"Optimizer stop condition: {0}\".format(trafo.GetOptimizerStopConditionDescription()))\r\n print(\"Number of iterations: {0}\".format(trafo.GetOptimizerIteration()))\r\n print(\"--------\")\r\n\r\n return transf\r\n\r\n\r\n# # Atlas segmentation function # #\r\n# --------------------------- #\r\n# Atlas-based segmentation using the CT images in 'ct_list'\r\n# and corresponding segmentation masks from 'seg_list'.\r\n# After that, majority voting to return a segmentation mask.\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# common_img : The chosen COMMON image [sitk-image]\r\n# ct_list : List of GROUP images [list]\r\n# seg_list : List of GROUP masks [list]\r\n\r\n# --- Output --- #\r\n# segmented_array : The segmentation as an array [numpy.ndarray]\r\n\r\ndef seg_atlas(common_img, ct_list, seg_list):\r\n # Creating the necessary lists\r\n seg = []\r\n image_list = []\r\n\r\n # # REGISTRATION # #\r\n for i in range(len(ct_list)):\r\n # Adjusting the settings and applying\r\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)\r\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\r\n\r\n # Perform registration on mask image\r\n resampler = sitk.ResampleImageFilter()\r\n resampler.SetReferenceImage(common_img)\r\n resampler.SetInterpolator(sitk.sitkLinear)\r\n\r\n resampler.SetTransform(final_trafo)\r\n resampled_mask = resampler.Execute(seg_list[i])\r\n\r\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\r\n seg.append(resampled_mask_data)\r\n\r\n # # MAJORITY VOTING # #\r\n for i in range(len(seg)):\r\n for j in range(i + 1, len(seg)):\r\n arr1 = np.transpose(np.nonzero(seg[i]))\r\n arr2 = np.transpose(np.nonzero(seg[j]))\r\n\r\n # Filling two lists\r\n arr1list = [tuple(e) for e in arr1.tolist()]\r\n arr2list = [tuple(e) for e in arr2.tolist()]\r\n\r\n # Sorting both lists\r\n arr1list.sort()\r\n arr2list.sort()\r\n\r\n # Creating necessary list & sorting\r\n intersections = list(set(arr1list).intersection(arr2list))\r\n intersections.sort()\r\n\r\n image_list.append(intersections)\r\n # Creating a list which contains the indexes of intersecting voxels\r\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))\r\n\r\n # Sorting the list\r\n intersection_list.sort()\r\n\r\n # Fetches array from image\r\n image_array = sitk.GetArrayFromImage(common_img)\r\n\r\n # Creates an array for the points and fills it using indexes\r\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\r\n for x, y, z in intersection_list:\r\n segmented_array[x, y, z] = 1\r\n\r\n return segmented_array\r\n\r\n\r\n# # Similarity function # #\r\n# --------------------------- #\r\n# Calculates the following distances between images:\r\n# 1. Jaccard coef.\r\n# 2. Dice coef.\r\n# 3. Hausdorff distance\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# mask_img : The mask image [sikt-image]\r\n# seg_img: The segmented image [sikt-image]\r\n\r\n# --- Output --- #\r\n# None\r\n\r\ndef distances(mask_img, seg_img):\r\n # Creating the necessary filters\r\n hausdorff = sitk.HausdorffDistanceImageFilter()\r\n overlap = sitk.LabelOverlapMeasuresImageFilter()\r\n\r\n # Execute filters\r\n hausdorff.Execute(mask_img, seg_img)\r\n overlap.Execute(mask_img, seg_img)\r\n\r\n # Fetching the distances and appending to distance list\r\n # Jaccard coef.\r\n jaccard = overlap.GetJaccardCoefficient()\r\n\r\n # Dice coef.\r\n dice = overlap.GetDiceCoefficient()\r\n\r\n # Hausdorff distance\r\n hausdorff_distance = hausdorff.GetHausdorffDistance()\r\n\r\n # Printing out the distances for user\r\n print('The Hausdorff distance: {}'.format(\r\n hausdorff_distance))\r\n print('The Dice coefficient: {}'.format(dice))\r\n print('The Jaccard coefficient: {}'.format(jaccard))\r\n\r\n return None\r\n\r\n\r\n# # Classifier Function # #\r\n# --------------------------- #\r\n# Trains a random forest classifier by reading 2d images and comparing\r\n# them to a vector which has labels that correspond to if it contains\r\n# the pubic symphysis. The labels are binary.\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# slice_list : List of 2D slice images [list]\r\n# vector_list : List of vectors with binary labels [list]\r\n\r\n# --- Output --- #\r\n# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]\r\n\r\ndef train_classifier(slice_list, vector_list):\r\n # Creating necessary list\r\n x_train_list = []\r\n\r\n # Reading in input data\r\n for image in slice_list:\r\n\r\n # Fetching arrays\r\n image_array = sitk.GetArrayFromImage(image)\r\n\r\n # Resizing\r\n image_array.resize((512, 512, 512))\r\n\r\n for z in range(image_array.shape[2]):\r\n x_train_list.append(image_array[:, :, z].flatten())\r\n x_train = np.asarray(x_train_list, dtype=np.uint8)\r\n\r\n # Reading in training labels\r\n y_train = None\r\n for i in range(0, len(vector_list)):\r\n if i == 0:\r\n y_train = vector_list[i]\r\n else:\r\n y_train = np.concatenate([y_train, vector_list[i]])\r\n\r\n # Train classifier\r\n trained_forest = RandomForestClassifier(n_estimators=150)\r\n trained_forest.fit(x_train, y_train)\r\n\r\n return trained_forest\r\n\r\n\r\n# # Classifier Function # #\r\n# --------------------------- #\r\n# Utilizes a trained random forest classifier by reading CT image and prints\r\n# which slice has the highest probability of containing the pubic symphysis.\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# ct_image : List of 2D axial slice images [list]\r\n# classifier : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]\r\n\r\n# --- Output --- #\r\n# None\r\n\r\ndef slice_probability(ct_image, classifier):\r\n # Creating necessary lists\r\n test_list = []\r\n max_list = []\r\n\r\n # Convert image to numpy array & resize\r\n im_array = sitk.GetArrayFromImage(ct_image)\r\n im_array.resize((512, 512, 512))\r\n\r\n for z in range(im_array.shape[2]):\r\n test_list.append(im_array[:, :, z].flatten())\r\n test_array = np.asarray(test_list, dtype=np.uint8)\r\n\r\n # Predict probabilities for each slice\r\n probabilities = classifier.predict_proba(test_array)\r\n\r\n # Fetching array with maximum probabilities\r\n max = np.amax(probabilities, axis=0)[1]\r\n\r\n for i, prob in enumerate(probabilities):\r\n if prob[1] == max:\r\n max_list.append(i)\r\n\r\n # Print result to user\r\n if len(max_list) == 1:\r\n print(\"Slice {} has highest probability which is: {}\".format(max_list[0], max))\r\n else:\r\n print(\"Slices {} have the highest probability which is: {}\".format(max_list, max))\r\n\r\n return None\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('bcs', '0002_auto_20200915_2245')]
operations = [migrations.AddField(model_name='study_material', name=
'study_materail_date', field=models.DateField(auto_now_add=True,
default=django.utils.timezone.now), preserve_default=False),
migrations.AlterField(model_name='exam', name='exam_date', field=
models.DateField(blank=True, default=datetime.date(2020, 9, 26))),
migrations.AlterField(model_name='exam', name='exam_time', field=
models.IntegerField(default=10)), migrations.AlterField(model_name=
'study_material', name='study_image', field=models.ImageField(blank
=True, null=True, upload_to='images/'))]
<|reserved_special_token_1|>
import datetime
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [('bcs', '0002_auto_20200915_2245')]
operations = [migrations.AddField(model_name='study_material', name=
'study_materail_date', field=models.DateField(auto_now_add=True,
default=django.utils.timezone.now), preserve_default=False),
migrations.AlterField(model_name='exam', name='exam_date', field=
models.DateField(blank=True, default=datetime.date(2020, 9, 26))),
migrations.AlterField(model_name='exam', name='exam_time', field=
models.IntegerField(default=10)), migrations.AlterField(model_name=
'study_material', name='study_image', field=models.ImageField(blank
=True, null=True, upload_to='images/'))]
<|reserved_special_token_1|>
# Generated by Django 3.1 on 2020-09-26 03:46
import datetime
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('bcs', '0002_auto_20200915_2245'),
]
operations = [
migrations.AddField(
model_name='study_material',
name='study_materail_date',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='exam',
name='exam_date',
field=models.DateField(blank=True, default=datetime.date(2020, 9, 26)),
),
migrations.AlterField(
model_name='exam',
name='exam_time',
field=models.IntegerField(default=10),
),
migrations.AlterField(
model_name='study_material',
name='study_image',
field=models.ImageField(blank=True, null=True, upload_to='images/'),
),
]
|
flexible
|
{
"blob_id": "61484d9a08f2e3fcd15573ce89be4118a442dc2e",
"index": 6062,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('bcs', '0002_auto_20200915_2245')]\n operations = [migrations.AddField(model_name='study_material', name=\n 'study_materail_date', field=models.DateField(auto_now_add=True,\n default=django.utils.timezone.now), preserve_default=False),\n migrations.AlterField(model_name='exam', name='exam_date', field=\n models.DateField(blank=True, default=datetime.date(2020, 9, 26))),\n migrations.AlterField(model_name='exam', name='exam_time', field=\n models.IntegerField(default=10)), migrations.AlterField(model_name=\n 'study_material', name='study_image', field=models.ImageField(blank\n =True, null=True, upload_to='images/'))]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('bcs', '0002_auto_20200915_2245')]\n operations = [migrations.AddField(model_name='study_material', name=\n 'study_materail_date', field=models.DateField(auto_now_add=True,\n default=django.utils.timezone.now), preserve_default=False),\n migrations.AlterField(model_name='exam', name='exam_date', field=\n models.DateField(blank=True, default=datetime.date(2020, 9, 26))),\n migrations.AlterField(model_name='exam', name='exam_time', field=\n models.IntegerField(default=10)), migrations.AlterField(model_name=\n 'study_material', name='study_image', field=models.ImageField(blank\n =True, null=True, upload_to='images/'))]\n",
"step-5": "# Generated by Django 3.1 on 2020-09-26 03:46\n\nimport datetime\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('bcs', '0002_auto_20200915_2245'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='study_material',\n name='study_materail_date',\n field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='exam',\n name='exam_date',\n field=models.DateField(blank=True, default=datetime.date(2020, 9, 26)),\n ),\n migrations.AlterField(\n model_name='exam',\n name='exam_time',\n field=models.IntegerField(default=10),\n ),\n migrations.AlterField(\n model_name='study_material',\n name='study_image',\n field=models.ImageField(blank=True, null=True, upload_to='images/'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
import show_imgs as si
IMG_PATH = "../sample_imgs"
def blur():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f"ksize: {ksize}"
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, "cv2.filter2D", 3)
resimg = si.show_imgs(blur_imgs, "cv2.blur", 3)
def gaussian():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["blur"] = cv2.blur(image, kernel_size)
blur_imgs["GaussianBlur"] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, "GaussianBlur", 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["gaussian"] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs["bilateral (5,50,50)"] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs["bilateral (5,150,150)"] = cv2.bilateralFilter(image, 5, 150, 150)
result_img = si.show_imgs(blur_imgs, "Bilateral Filter", 2)
if __name__ == "__main__":
# gaussian()
bilateral()
|
normal
|
{
"blob_id": "8e5d05d925d47a85ad7c211f26af7951be048d32",
"index": 9351,
"step-1": "<mask token>\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-3": "<mask token>\nIMG_PATH = '../sample_imgs'\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-4": "import cv2\nimport numpy as np\nimport show_imgs as si\nIMG_PATH = '../sample_imgs'\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-5": "import cv2\nimport numpy as np\nimport show_imgs as si\nIMG_PATH = \"../sample_imgs\"\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f\"ksize: {ksize}\"\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, \"cv2.filter2D\", 3)\n resimg = si.show_imgs(blur_imgs, \"cv2.blur\", 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_size = (5, 5)\n blur_imgs = {}\n blur_imgs[\"original\"] = image\n blur_imgs[\"blur\"] = cv2.blur(image, kernel_size)\n blur_imgs[\"GaussianBlur\"] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, \"GaussianBlur\", 3, 1000)\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_size = (5, 5)\n blur_imgs = {}\n blur_imgs[\"original\"] = image\n blur_imgs[\"gaussian\"] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs[\"bilateral (5,50,50)\"] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs[\"bilateral (5,150,150)\"] = cv2.bilateralFilter(image, 5, 150, 150)\n result_img = si.show_imgs(blur_imgs, \"Bilateral Filter\", 2)\n\n\n\nif __name__ == \"__main__\":\n # gaussian()\n bilateral()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
help_txt = """
:help, show this help menu. :help [command] for detail
:dict [word], only find translation on dict.cn
:google [sentence], only find translation on google api
:lan2lan [sentence], translate from one language to another language
:add [word], add new word to your library
:del [word], delete word from your library
:list [number], list words in your library
:rating [number], lsit words in your library with a certain rate
:history [number], show your search history
:clear, clear your oldest 100 history
for more information, browser http://mardict.appspot.com
"""
help_dict = """
help on dict:
[usage] :dict word
[intro] translate your word only use dict.cn api
[eg] :dict hello
more on http://mardict.appspot.com/help/#dict
"""
help_google = """
help on google:
[usage] :google word
[intro] translate your word only use google api
[eg] :google google is a bitch
more on http://mardict.appspot.com/help/#google
"""
help_lan2lan = """
help on lan2lan:
[usage] :lan2lan word
[intro] translate from one language to another language by google translation api
[eg] :en2zh hello
more on http://mardict.appspot.com/help/#lan2lan
"""
help_history = """
help on history:
[usage] :history (number)
[intro] list your search history
[eg] :history 9
more on http://mardict.appspot.com/help/#history
"""
help_clear = """
help on clear:
[usage] :clear
[intro] clear your search history
more on http://mardict.appspot.com/help/#clear
"""
help_add = """
help on add:
[usage] :add (word)
[intro] add the new word to your library(storing your unfamiliar word)
[eg] :add hello
more on http://mardict.appspot.com/help/#add
"""
help_del = """
help on del:
[usage] :del word
[intro] delete the word from your library
[eg] :del hello
more on http://mardict.appspot.com/help/#del
"""
help_list = """
help on list:
[usage] :list (number)
[intro] list a certain number of words from your library.
[eg] :list 9
this function is very complex, browser the website.
more on http://mardict.appspot.com/help/#list
"""
help_rating = """
help on rating:
[usage] :rating (number)
[intro] list a certain number of words from your library with a certain rate.
[eg] :rating 0 9
this function is very complex, browser the website.
more on http://mardict.appspot.com/help/#rating
"""
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
help_txt = """
:help, show this help menu. :help [command] for detail
:dict [word], only find translation on dict.cn
:google [sentence], only find translation on google api
:lan2lan [sentence], translate from one language to another language
:add [word], add new word to your library
:del [word], delete word from your library
:list [number], list words in your library
:rating [number], lsit words in your library with a certain rate
:history [number], show your search history
:clear, clear your oldest 100 history
for more information, browser http://mardict.appspot.com
"""
help_dict = """
help on dict:
[usage] :dict word
[intro] translate your word only use dict.cn api
[eg] :dict hello
more on http://mardict.appspot.com/help/#dict
"""
help_google = """
help on google:
[usage] :google word
[intro] translate your word only use google api
[eg] :google google is a bitch
more on http://mardict.appspot.com/help/#google
"""
help_lan2lan = """
help on lan2lan:
[usage] :lan2lan word
[intro] translate from one language to another language by google translation api
[eg] :en2zh hello
more on http://mardict.appspot.com/help/#lan2lan
"""
help_history = """
help on history:
[usage] :history (number)
[intro] list your search history
[eg] :history 9
more on http://mardict.appspot.com/help/#history
"""
help_clear = """
help on clear:
[usage] :clear
[intro] clear your search history
more on http://mardict.appspot.com/help/#clear
"""
help_add = """
help on add:
[usage] :add (word)
[intro] add the new word to your library(storing your unfamiliar word)
[eg] :add hello
more on http://mardict.appspot.com/help/#add
"""
help_del = """
help on del:
[usage] :del word
[intro] delete the word from your library
[eg] :del hello
more on http://mardict.appspot.com/help/#del
"""
help_list = """
help on list:
[usage] :list (number)
[intro] list a certain number of words from your library.
[eg] :list 9
this function is very complex, browser the website.
more on http://mardict.appspot.com/help/#list
"""
help_rating = """
help on rating:
[usage] :rating (number)
[intro] list a certain number of words from your library with a certain rate.
[eg] :rating 0 9
this function is very complex, browser the website.
more on http://mardict.appspot.com/help/#rating
"""
|
flexible
|
{
"blob_id": "3fadb91bd2367819a540f687530f4b48ed878423",
"index": 9149,
"step-1": "<mask token>\n",
"step-2": "help_txt = \"\"\"\n:help, show this help menu. :help [command] for detail\n:dict [word], only find translation on dict.cn\n:google [sentence], only find translation on google api\n:lan2lan [sentence], translate from one language to another language\n:add [word], add new word to your library\n:del [word], delete word from your library\n:list [number], list words in your library\n:rating [number], lsit words in your library with a certain rate\n:history [number], show your search history\n:clear, clear your oldest 100 history\n\nfor more information, browser http://mardict.appspot.com\n\"\"\"\nhelp_dict = \"\"\"\nhelp on dict:\n[usage] :dict word\n[intro] translate your word only use dict.cn api\n[eg] :dict hello\n\nmore on http://mardict.appspot.com/help/#dict\n\"\"\"\nhelp_google = \"\"\"\nhelp on google:\n[usage] :google word\n[intro] translate your word only use google api\n[eg] :google google is a bitch\n\nmore on http://mardict.appspot.com/help/#google\n\"\"\"\nhelp_lan2lan = \"\"\"\nhelp on lan2lan:\n[usage] :lan2lan word\n[intro] translate from one language to another language by google translation api\n[eg] :en2zh hello\n\nmore on http://mardict.appspot.com/help/#lan2lan\n\"\"\"\nhelp_history = \"\"\"\nhelp on history:\n[usage] :history (number)\n[intro] list your search history\n[eg] :history 9\n\nmore on http://mardict.appspot.com/help/#history\n\"\"\"\nhelp_clear = \"\"\"\nhelp on clear:\n[usage] :clear\n[intro] clear your search history\n\nmore on http://mardict.appspot.com/help/#clear\n\"\"\"\nhelp_add = \"\"\"\nhelp on add:\n[usage] :add (word)\n[intro] add the new word to your library(storing your unfamiliar word)\n[eg] :add hello\n\nmore on http://mardict.appspot.com/help/#add\n\"\"\"\nhelp_del = \"\"\"\nhelp on del:\n[usage] :del word\n[intro] delete the word from your library\n[eg] :del hello\n\nmore on http://mardict.appspot.com/help/#del\n\"\"\"\nhelp_list = \"\"\"\nhelp on list:\n[usage] :list (number)\n[intro] list a certain number of words from your library.\n[eg] :list 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#list\n\"\"\"\nhelp_rating = \"\"\"\nhelp on rating:\n[usage] :rating (number)\n[intro] list a certain number of words from your library with a certain rate.\n[eg] :rating 0 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#rating\n\"\"\"\n",
"step-3": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nhelp_txt = \"\"\"\n:help, show this help menu. :help [command] for detail\n:dict [word], only find translation on dict.cn\n:google [sentence], only find translation on google api\n:lan2lan [sentence], translate from one language to another language\n:add [word], add new word to your library\n:del [word], delete word from your library\n:list [number], list words in your library\n:rating [number], lsit words in your library with a certain rate\n:history [number], show your search history\n:clear, clear your oldest 100 history\n\nfor more information, browser http://mardict.appspot.com\n\"\"\"\n\nhelp_dict = \"\"\"\nhelp on dict:\n[usage] :dict word\n[intro] translate your word only use dict.cn api\n[eg] :dict hello\n\nmore on http://mardict.appspot.com/help/#dict\n\"\"\"\n\nhelp_google = \"\"\"\nhelp on google:\n[usage] :google word\n[intro] translate your word only use google api\n[eg] :google google is a bitch\n\nmore on http://mardict.appspot.com/help/#google\n\"\"\"\n\nhelp_lan2lan = \"\"\"\nhelp on lan2lan:\n[usage] :lan2lan word\n[intro] translate from one language to another language by google translation api\n[eg] :en2zh hello\n\nmore on http://mardict.appspot.com/help/#lan2lan\n\"\"\"\n\nhelp_history = \"\"\"\nhelp on history:\n[usage] :history (number)\n[intro] list your search history\n[eg] :history 9\n\nmore on http://mardict.appspot.com/help/#history\n\"\"\"\n\nhelp_clear = \"\"\"\nhelp on clear:\n[usage] :clear\n[intro] clear your search history\n\nmore on http://mardict.appspot.com/help/#clear\n\"\"\"\n\nhelp_add = \"\"\"\nhelp on add:\n[usage] :add (word)\n[intro] add the new word to your library(storing your unfamiliar word)\n[eg] :add hello\n\nmore on http://mardict.appspot.com/help/#add\n\"\"\"\n\nhelp_del = \"\"\"\nhelp on del:\n[usage] :del word\n[intro] delete the word from your library\n[eg] :del hello\n\nmore on http://mardict.appspot.com/help/#del\n\"\"\"\n\nhelp_list = \"\"\"\nhelp on list:\n[usage] :list (number)\n[intro] list a certain number of words from your library.\n[eg] :list 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#list\n\"\"\"\n\nhelp_rating = \"\"\"\nhelp on rating:\n[usage] :rating (number)\n[intro] list a certain number of words from your library with a certain rate.\n[eg] :rating 0 9\n\nthis function is very complex, browser the website.\n\nmore on http://mardict.appspot.com/help/#rating\n\"\"\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
GPIO.setmode(GPIO.BCM)
GPIO.setup(solenoid1, GPIO.OUT)
GPIO.setup(solenoid2, GPIO.OUT)
GPIO.setup(solenoid3, GPIO.OUT)
GPIO.setup(solenoid4, GPIO.OUT)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(motor1, GPIO.OUT)
<|reserved_special_token_0|>
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
solenoid1 = 23
solenoid2 = 24
solenoid3 = 4
solenoid4 = 17
motor1 = 18
led1 = 25
switch1 = 6
switch2 = 13
GPIO.setmode(GPIO.BCM)
GPIO.setup(solenoid1, GPIO.OUT)
GPIO.setup(solenoid2, GPIO.OUT)
GPIO.setup(solenoid3, GPIO.OUT)
GPIO.setup(solenoid4, GPIO.OUT)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(motor1, GPIO.OUT)
motor1pwm = GPIO.PWM(motor1, 100)
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
<|reserved_special_token_1|>
import RPi.GPIO as GPIO
import time
import timeit
import sys
import os
import random
import datetime
import collections
import threading
from Queue import Queue
solenoid1 = 23
solenoid2 = 24
solenoid3 = 4
solenoid4 = 17
motor1 = 18
led1 = 25
switch1 = 6
switch2 = 13
GPIO.setmode(GPIO.BCM)
GPIO.setup(solenoid1, GPIO.OUT)
GPIO.setup(solenoid2, GPIO.OUT)
GPIO.setup(solenoid3, GPIO.OUT)
GPIO.setup(solenoid4, GPIO.OUT)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(motor1, GPIO.OUT)
motor1pwm = GPIO.PWM(motor1, 100)
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
<|reserved_special_token_1|>
#!/usr/bin/env python
# Standardised set up
import RPi.GPIO as GPIO # External module imports GPIO
import time # Library to slow or give a rest to the script
import timeit # Alternative timing library for platform specific timing
import sys # Library to access program arguments and call exits
import os # Library provides functionality to clear screen
import random
import datetime
import collections
import threading
from Queue import Queue
# Pin definiton using Broadcom scheme
solenoid1 = 23 # GPIO 16
solenoid2 = 24 # GPIO 18
solenoid3 = 4 # GPIO 07
solenoid4 = 17 # GPIO 11
motor1 = 18 # GPIO 12
led1 = 25 # GPIO 22
switch1 = 6 # GPIO 31
switch2 = 13 # GPIO 33
# Pin setup
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.setup(solenoid1, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid2, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid3, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid4, GPIO.OUT) # set as I/O output
GPIO.setup(led1, GPIO.OUT) # set as I/O output
GPIO.setup(motor1, GPIO.OUT) # set as I/O output
motor1pwm = GPIO.PWM(motor1,100) # set pwm on motor1 pin
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
bold = '\033[1m'
underline = '\033[4m'
end = '\033[0m'
|
flexible
|
{
"blob_id": "4e9fd3ee2a78fae164d9f38704443ac5b2f4c11c",
"index": 1189,
"step-1": "<mask token>\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-2": "<mask token>\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(solenoid1, GPIO.OUT)\nGPIO.setup(solenoid2, GPIO.OUT)\nGPIO.setup(solenoid3, GPIO.OUT)\nGPIO.setup(solenoid4, GPIO.OUT)\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(motor1, GPIO.OUT)\n<mask token>\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-3": "<mask token>\nsolenoid1 = 23\nsolenoid2 = 24\nsolenoid3 = 4\nsolenoid4 = 17\nmotor1 = 18\nled1 = 25\nswitch1 = 6\nswitch2 = 13\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(solenoid1, GPIO.OUT)\nGPIO.setup(solenoid2, GPIO.OUT)\nGPIO.setup(solenoid3, GPIO.OUT)\nGPIO.setup(solenoid4, GPIO.OUT)\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(motor1, GPIO.OUT)\nmotor1pwm = GPIO.PWM(motor1, 100)\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-4": "import RPi.GPIO as GPIO\nimport time\nimport timeit\nimport sys\nimport os\nimport random\nimport datetime\nimport collections\nimport threading\nfrom Queue import Queue\nsolenoid1 = 23\nsolenoid2 = 24\nsolenoid3 = 4\nsolenoid4 = 17\nmotor1 = 18\nled1 = 25\nswitch1 = 6\nswitch2 = 13\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(solenoid1, GPIO.OUT)\nGPIO.setup(solenoid2, GPIO.OUT)\nGPIO.setup(solenoid3, GPIO.OUT)\nGPIO.setup(solenoid4, GPIO.OUT)\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(motor1, GPIO.OUT)\nmotor1pwm = GPIO.PWM(motor1, 100)\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n\nclass colour:\n purple = '\\x1b[95m'\n cyan = '\\x1b[96m'\n darkcyan = '\\x1b[36m'\n blue = '\\x1b[94m'\n green = '\\x1b[92m'\n yellow = '\\x1b[93m'\n red = '\\x1b[91m'\n bold = '\\x1b[1m'\n underline = '\\x1b[4m'\n end = '\\x1b[0m'\n",
"step-5": "#!/usr/bin/env python\n\n# Standardised set up\nimport RPi.GPIO as GPIO # External module imports GPIO\nimport time # Library to slow or give a rest to the script\nimport timeit # Alternative timing library for platform specific timing\nimport sys # Library to access program arguments and call exits\nimport os # Library provides functionality to clear screen\nimport random\nimport datetime\nimport collections\nimport threading\nfrom Queue import Queue\n\n# Pin definiton using Broadcom scheme\nsolenoid1 = 23 # GPIO 16\nsolenoid2 = 24 # GPIO 18\nsolenoid3 = 4 # GPIO 07\nsolenoid4 = 17 # GPIO 11\nmotor1 = 18 # GPIO 12\nled1 = 25 # GPIO 22\nswitch1 = 6 # GPIO 31\nswitch2 = 13 # GPIO 33\n\n# Pin setup\nGPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme\nGPIO.setup(solenoid1, GPIO.OUT) # set as I/O output\nGPIO.setup(solenoid2, GPIO.OUT) # set as I/O output\nGPIO.setup(solenoid3, GPIO.OUT) # set as I/O output\nGPIO.setup(solenoid4, GPIO.OUT) # set as I/O output\nGPIO.setup(led1, GPIO.OUT) # set as I/O output\nGPIO.setup(motor1, GPIO.OUT) # set as I/O output\nmotor1pwm = GPIO.PWM(motor1,100) # set pwm on motor1 pin\nGPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nGPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\nclass colour:\n purple = '\\033[95m'\n cyan = '\\033[96m'\n darkcyan = '\\033[36m'\n blue = '\\033[94m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n bold = '\\033[1m'\n underline = '\\033[4m'\n end = '\\033[0m'\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*
#Perso
from signalManipulation import *
from manipulateData import *
#Module
import pickle
from sklearn import svm, grid_search
from sklearn.linear_model import ElasticNetCV, ElasticNet, RidgeClassifier
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, roc_auc_score
from sklearn.preprocessing import scale
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import StratifiedKFold
from copy import copy,deepcopy
import pylab as pl
#======================== TOOLS ========================
#======================================================
def writeResults(results, best_params, best_score, modelType, penalty, scoreType,\
transformedData, scores=None):
"""
Write results of a grid_search in a file
[parameters] [score] [STD]
...
[Confusion Matrix of the best model on train]
[Confusion Matrix of the best model on test]
Best Params : XXXX Score CV : XXX%
Accuracy Train : XX Accuracy Test : XX
F1 Train : XX F1 Test : XX
Ex :
1.3 0.91
1.7 0.65
[[9787 4]
[ 399 520]]
[[6690 276]
[ 598 30]]
Best Params : 1.3 Score CV : 0.91
Accuracy Train : 0.91 Accuracy Test : 0.80
F1 Train : 0.80 F1 Test : 0.50
"""
strScores = ""
if modelType=='NonLinear':
for model in results:
print(model)
strScores += "{:.4} {} {} {}\n".format(model[0]['C'], model[0]['gamma'], model[1], np.std(model[2]))
elif modelType=='ElasticNet':
for model in results:
print(model)
strScores += "{:.4} {} {} {}\n".format(model[0]['alpha'], model[0]['l1_ratio'], model[1], np.std(model[2]))
elif modelType=='Pipe':
for model in results:
print(model)
if 'classif__C' in model[0].keys():
strScores += "{} {:.4} {} {}\n".format(model[0]['csp__n_components'], model[0]['classif__C'], model[1], np.std(model[2]))
else:
strScores += "{} {:.4} {} {}\n".format(model[0]['csp__n_components'], model[0]['classif__alpha'], model[1], np.std(model[2]))
elif modelType=='Ridge':
for model in results:
print(model)
strScores += "{:.4} {} {}\n".format(model[0]['alpha'], model[1], np.std(model[2]))
else: #Linear, C is the only parameter
for model in results:
print(model)
strScores += "{:.4} {} {}\n".format(model[0]['C'], model[1], np.std(model[2]))
strScores += "Best Params : {} Score CrossVal : {} \n".format(best_params, best_score)
if scores:
strScores += "{}\n{}\n".format(str(scores['cMatrixTrain']),\
str(scores['cMatrixTest']))
strScores += "Accuracy Train : {} Accuracy Test : {} \n".format(scores['accTrain'], scores['accTest'])
strScores += "F1 Train : {} F1 Test : {} \n".format(scores['f1Train'],\
scores['f1Test'])
strScores += "Roc_Auc Train : {} Roc_Auc Test : {} \n".format(scores['rocTrain'],scores['rocTest'])
else:
print("No Test file")
strScores += "\nNo Test file\n=========\n"
f = open("{}{}HyperSelection{}{}{}.txt".format(RESULTS_PATH, penalty, modelType.title(), scoreType.title(), transformedData.title()), 'w')
f.write(strScores)
f.close()
def getScores(y, yPredTrain, yTest, yPredTest):
scores = dict()
scores['f1Train'] = f1_score(y, yPredTrain)
scores['f1Test'] = f1_score(yTest, yPredTest)
scores['accTrain'] = accuracy_score(y, yPredTrain)
scores['accTest'] = accuracy_score(yTest, yPredTest)
scores['rocTrain'] = roc_auc_score(y, yPredTrain)
scores['rocTest'] = roc_auc_score(yTest, yPredTest)
scores['cMatrixTrain'] = confusion_matrix(y, yPredTrain)
scores['cMatrixTest'] = confusion_matrix(yTest, yPredTest)
proba = float(len(np.where(y==1)[0]))/len(y)
if proba < 0.50:
proba = 1 - proba
scores['random'] = proba
return scores
def printScores(scores):
strSave = "Train :\n"
strSave += "Accuracy : {}\n".format(scores['accTrain'])
strSave += "Roc_Auc : {}\n".format(scores['rocTrain'])
strSave += "F1 : {}\n".format(scores['f1Train'])
strSave += "{}\n".format(scores['cMatrixTrain'])
strSave += "Test :\n"
strSave += "Accuracy : {}\n".format(scores['accTest'])
strSave += "Roc_Auc : {}\n".format(scores['rocTest'])
strSave += "F1 : {}\n".format(scores['f1Test'])
strSave += "{}\n".format(scores['cMatrixTest'])
strSave += "Random Accuracy : {}".format(scores['random'])
print strSave
return strSave
def testModel(best,X,y,xTest,yTest,penalty):
print("Predicting Data :")
yPredTrain = best.predict(X)
yPredTest = best.predict(xTest)
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
if penalty=='l1':
saveNonZerosCoef(best, 'l1', dataType=transformedData)
analyzeCoef(dataType=transformedData, reg='l1')
return scores
def saveNonZerosCoef(clf, reg, dataType):
nonZerosParams = np.where(clf.coef_ != 0)[0]
print("Nombre de coef : ", len(clf.coef_[0]))
print("Nombre de coef annulés : ", len(nonZerosParams))
with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'w') as f:
f.write(str(list(nonZerosParams)))
analyzeCoef(dataType, reg)
def analyzeCoef(dataType, reg):
path = "Images/Screenshots/"
with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'r') as f:
wholeFile = f.read()
print("Here")
print(wholeFile[0], wholeFile[-1])
wholeFile = wholeFile[1:-1]
numGen = map(int,wholeFile.split(','))
#Step
step = np.zeros(40)
steps = np.array([i+1 for i in range(40)])
for num in numGen:
step[num%40] += 1
numGen = map(int,wholeFile.split(','))
#Elec
elec = np.zeros(64)
elecs = np.array([i+1 for i in range(64)])
for num in numGen:
elec[num//40] += 1
ax = plt.subplot()
steps = np.array(steps)/60
ax.bar(steps, step, width=1/60)
ax.set_title("Nombre de coefficients non annulés par pas de temps")
plt.savefig(path+'nonZerosStep{}{}.png'.format(dataType.title(),reg))
plt.show()
ax = plt.subplot()
ax.bar(elecs, elec, width=1)
ax.set_title("Nombre de coefficients non annulés par electrode")
plt.savefig(path+'nonZerosElec{}{}.png'.format(dataType.title(),reg))
plt.show()
#=============== Learner =============================
#====================================================
def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
"""
Grid Search over a set of parameters for linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
cRange = np.logspace(-5,1,3)
parameters = {'C': cRange}
if penalty=='l1':
dual=False
else:
dual=True
#Creating Model and begin classification
#=======================================
classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\
penalty,scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\
penalty,scoring, transformedData, scores=None)
def learnHyperNonLinear(X, y, xTest, yTest, scoring, transformedData,jobs=1):
"""
Grid Search over a set of parameters for a non-linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
cRange = np.logspace(-5,2,8)
gRange = np.logspace(-5,2,8)
parameters = {'C': cRange, 'gamma':gRange}
#Creating Model and begin classification
#=======================================
classif = svm.SVC(class_weight=CLASS_WEIGHT)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3,refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'NonLinear', 'l2', scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'NonLinear', 'l2', scoring, transformedData, scores=None)
def learnRidge(X,y,xTest,yTest,scoring, transformedData, jobs):
"""
Grid Search over a set of parameters for linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
alpha = np.logspace(-3,3,6)
parameters = {'alpha': alpha}
#Creating Model and begin classification
#=======================================
classif = RidgeClassifier(class_weight=CLASS_WEIGHT)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=10, n_jobs=jobs, verbose=3, refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\
'l2',scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\
'l2',scoring, transformedData, scores=None)
def learnRandomForest(X,y,xTest,yTest,scoring, jobs):
params = {
'n_estimators':[2,10,100],
'max_features':['auto',2,10],
'max_depth':[10,40,2],
'min_samples_split':[2,10,20,50]
}
forest = RandomForestClassifier()
grd = grid_search.GridSearchCV(forest,params, scoring=scoring,cv=3,n_jobs=jobs,verbose=3)
grd.fit(X,y)
yPredTrain = grd.predict(X)
yPredTest = grd.predict(xTest)
print "FOREST : \n"
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
def learnCspPipeline(X, y, xTest, yTest, scoring,transformedData,jobs=1, classifier='lin'):
testAvailable = np.size(xTest)
X = vecToMat(X)
if testAvailable:
xTest = vecToMat(xTest)
if classifier=='lin':
classif = svm.LinearSVC(penalty='l2',class_weight=CLASS_WEIGHT)
params = np.logspace(-5,1,3)
hyper = 'classif__C'
else:
classif = RidgeClassifier(class_weight=CLASS_WEIGHT)
params = np.logspace(-1,3,10)
hyper = 'classif__alpha'
csp = CSP(reg='ledoit_wolf',log=False)
scaler = StandardScaler()
pipe = Pipeline(steps = [('csp',csp), ('scaler',scaler), ('classif',classif)])
pipe = Pipeline(steps = [('csp',csp), ('classif',classif)])
n_components = [1,2,5,10,20,30,40,50]
dico = {'csp__n_components':n_components, hyper:params}
grd = grid_search.GridSearchCV(pipe,dico, cv=5, verbose=3, n_jobs=4)
grd.fit(X,y)
if testAvailable:
scores = testModel(grd.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=None)
def learnElasticNet(X,y,xTest,yTest,scoring,transformedData='raw',jobs=1):
# Parameters selection
#====================
alpha = np.linspace(0.01,0.2,5)
l1_ratio = np.linspace(0.01,0.3,5)
parameters = {'alpha': alpha, 'l1_ratio': l1_ratio}
#Creating Model and begin classification
#=======================================
classif = ElasticNet(selection='random')
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
best = clf.best_estimator_
print(clf.best_params_, clf.best_score_)
if np.size(a,0)!=0:
print("Predicting Data :")
yPredTrain = best.predict(X)
yPredTrain[yPredTrain >= 0] = 1
yPredTrain[yPredTrain < 0] = -1
yPredTest = best.predict(xTest)
yPredTest[yPredTest >= 0] = 1
yPredTest[yPredTest < 0] = -1
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'ElasticNet', 'l1l2', scoring, transformedData, scores)
nonZerosParams = np.where(best.coef_ != 0)[0]
print(len(nonZerosParams))
print(nonZerosParams)
with open('nonZerosParamsRawElasticNet', 'w') as f:
f.write(str(list(nonZerosParams)))
def learnStep(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
cRange = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1, 10]
parameters = {'C': cRange}
best_score = 0
numStep = np.size(X,1)//64
keptStep = np.ones(numStep, dtype=bool)
copyX = copy(X)
copyXTest = copy(xTest)
scores = np.zeros(numStep)
scoreDecrease = False
numFailed = 0
while not scoreDecrease:
scores[:] = 0
for step in range(numStep):
if not keptStep[step] :
continue
else:
erased = list(np.where(keptStep==False)[0])
if erased != []:
erased.append(step)
X = delTimeStep(X, erased, transformedData)
xTest = delTimeStep(xTest, erased, transformedData)
else:
X = delTimeStep(X,step, transformedData)
xTest = delTimeStep(xTest, step, transformedData)
print("Learning Model without step N°",step)
clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring,\
cv=5, n_jobs=jobs, verbose=3)
clf.fit(X,y)
best = clf.best_estimator_
print(clf.best_params_, clf.best_score_)
yPredTest = best.predict(xTest)
if scoring=='f1':
scores[step] = f1_score(yTest, yPredTest)
else:
scores[step] = roc_auc_score(yTest, yPredTest)
print("Score :", scores[step])
#post process :
X = copy(copyX)
xTest = copy(copyXTest)
worstStep = np.argmax(scores)
keptStep[worstStep] = False
print("Score max : {}, removing step N°{}".format(scores[worstStep], worstStep))
print("Step removed : ", np.where(keptStep==False))
print("Past Best : ", best_score)
if scores[worstStep] > best_score:
best_score = scores[worstStep]
else:
numFailed += 1
if numFailed > 3:
scoreDecrease = True
def learnElecFaster(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
cRange = np.logspace(-5,2,8)
parameters = {'C': cRange}
if np.size(xTest)!=0:
X = np.concatenate((X,xTest))
y = np.concatenate((y,yTest))
# clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3)
# clf.fit(X,y)
# bestParams = clf.best_params_
# print(bestParams['C'], clf.best_score_)
# C = bestParams['C']
C = 1e-5
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
best_score = 0
best_selection = []
keptElec = np.ones(64, dtype=bool)
copyX = copy(X)
scores = np.zeros(64)
scoreDecrease = False
numFailed = 0
for numIter in range(63):
scores[:] = 0
for elec in range(64):
if not keptElec[elec] :
#Already deleted
continue
else:
print("Deleting Electrode(s) ...")
erased = list(np.where(keptElec==False)[0])
if erased != []:
erased.append(elec)
X = delElec(X, erased, transformedData)
else:
X = delElec(X,elec, transformedData)
print("Learning Model without elec N°",elec)
clf = grid_search.GridSearchCV(baseClf, {'C':[C]}, scoring=scoring, cv=10, n_jobs=jobs, verbose=1)
clf.fit(X,y)
scores[elec] = clf.best_score_
print(scores[elec])
#post process :
X = copy(copyX)
worstElec = np.argmax(scores)
keptElec[worstElec] = False
removedElec = np.where(keptElec==False)
print("Score max : {}, removing elec N°{}".format(scores[worstElec], worstElec))
print("Elec removed : ", removedElec)
print("Past Best : ", best_score, "with : ", best_selection)
if scores[worstElec] > best_score:
best_score = scores[worstElec]
best_selection = np.where(keptElec==False)
else:
numFailed += 1
with open("selecStep.txt",'a') as f:
f.write("{} : {} with elec {}, numFailed : {}\n".format(numIter, scores[worstElec], removedElec, numFailed))
|
normal
|
{
"blob_id": "d8e8ecbf77828e875082abf8dcbfbc2c29564e20",
"index": 4892,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*\n#Perso\nfrom signalManipulation import *\nfrom manipulateData import *\n\n#Module\nimport pickle\n\nfrom sklearn import svm, grid_search\nfrom sklearn.linear_model import ElasticNetCV, ElasticNet, RidgeClassifier\nfrom sklearn.metrics import confusion_matrix, f1_score, accuracy_score, roc_auc_score\nfrom sklearn.preprocessing import scale\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.cross_validation import StratifiedKFold\n\nfrom copy import copy,deepcopy\n\nimport pylab as pl\n\n#======================== TOOLS ========================\n#======================================================\ndef writeResults(results, best_params, best_score, modelType, penalty, scoreType,\\\n transformedData, scores=None):\n \"\"\"\n Write results of a grid_search in a file\n [parameters] [score] [STD]\n ...\n [Confusion Matrix of the best model on train]\n [Confusion Matrix of the best model on test]\n Best Params : XXXX Score CV : XXX%\n Accuracy Train : XX Accuracy Test : XX\n F1 Train : XX F1 Test : XX\n\n Ex :\n\n 1.3 0.91\n 1.7 0.65\n [[9787 4]\n [ 399 520]]\n [[6690 276]\n [ 598 30]]\n Best Params : 1.3 Score CV : 0.91\n Accuracy Train : 0.91 Accuracy Test : 0.80\n F1 Train : 0.80 F1 Test : 0.50\n \"\"\"\n\n strScores = \"\"\n\n if modelType=='NonLinear':\n for model in results:\n print(model)\n strScores += \"{:.4} {} {} {}\\n\".format(model[0]['C'], model[0]['gamma'], model[1], np.std(model[2]))\n elif modelType=='ElasticNet':\n for model in results:\n print(model)\n strScores += \"{:.4} {} {} {}\\n\".format(model[0]['alpha'], model[0]['l1_ratio'], model[1], np.std(model[2]))\n\n elif modelType=='Pipe':\n for model in results:\n print(model)\n if 'classif__C' in model[0].keys():\n strScores += \"{} {:.4} {} {}\\n\".format(model[0]['csp__n_components'], model[0]['classif__C'], model[1], np.std(model[2]))\n else:\n strScores += \"{} {:.4} {} {}\\n\".format(model[0]['csp__n_components'], model[0]['classif__alpha'], model[1], np.std(model[2]))\n\n elif modelType=='Ridge':\n for model in results:\n print(model)\n strScores += \"{:.4} {} {}\\n\".format(model[0]['alpha'], model[1], np.std(model[2]))\n\n \n else: #Linear, C is the only parameter\n for model in results:\n print(model)\n strScores += \"{:.4} {} {}\\n\".format(model[0]['C'], model[1], np.std(model[2]))\n \n\n strScores += \"Best Params : {} Score CrossVal : {} \\n\".format(best_params, best_score)\n\n if scores:\n strScores += \"{}\\n{}\\n\".format(str(scores['cMatrixTrain']),\\\n str(scores['cMatrixTest']))\n\n strScores += \"Accuracy Train : {} Accuracy Test : {} \\n\".format(scores['accTrain'], scores['accTest'])\n strScores += \"F1 Train : {} F1 Test : {} \\n\".format(scores['f1Train'],\\\n scores['f1Test'])\n strScores += \"Roc_Auc Train : {} Roc_Auc Test : {} \\n\".format(scores['rocTrain'],scores['rocTest'])\n else:\n print(\"No Test file\")\n strScores += \"\\nNo Test file\\n=========\\n\"\n \n f = open(\"{}{}HyperSelection{}{}{}.txt\".format(RESULTS_PATH, penalty, modelType.title(), scoreType.title(), transformedData.title()), 'w')\n f.write(strScores)\n f.close()\n\ndef getScores(y, yPredTrain, yTest, yPredTest):\n\n scores = dict()\n\n scores['f1Train'] = f1_score(y, yPredTrain)\n scores['f1Test'] = f1_score(yTest, yPredTest)\n\n\n scores['accTrain'] = accuracy_score(y, yPredTrain)\n scores['accTest'] = accuracy_score(yTest, yPredTest)\n \n\n scores['rocTrain'] = roc_auc_score(y, yPredTrain)\n scores['rocTest'] = roc_auc_score(yTest, yPredTest)\n \n\n scores['cMatrixTrain'] = confusion_matrix(y, yPredTrain)\n scores['cMatrixTest'] = confusion_matrix(yTest, yPredTest)\n\n proba = float(len(np.where(y==1)[0]))/len(y)\n if proba < 0.50:\n proba = 1 - proba\n scores['random'] = proba\n \n return scores\n\ndef printScores(scores):\n\n strSave = \"Train :\\n\"\n strSave += \"Accuracy : {}\\n\".format(scores['accTrain'])\n strSave += \"Roc_Auc : {}\\n\".format(scores['rocTrain'])\n strSave += \"F1 : {}\\n\".format(scores['f1Train'])\n strSave += \"{}\\n\".format(scores['cMatrixTrain'])\n\n strSave += \"Test :\\n\"\n strSave += \"Accuracy : {}\\n\".format(scores['accTest'])\n strSave += \"Roc_Auc : {}\\n\".format(scores['rocTest'])\n strSave += \"F1 : {}\\n\".format(scores['f1Test'])\n strSave += \"{}\\n\".format(scores['cMatrixTest'])\n\n strSave += \"Random Accuracy : {}\".format(scores['random'])\n\n \n print strSave\n return strSave\n\n\ndef testModel(best,X,y,xTest,yTest,penalty):\n \n print(\"Predicting Data :\")\n yPredTrain = best.predict(X)\n yPredTest = best.predict(xTest)\n scores = getScores(y, yPredTrain, yTest, yPredTest)\n printScores(scores)\n\n if penalty=='l1':\n saveNonZerosCoef(best, 'l1', dataType=transformedData)\n analyzeCoef(dataType=transformedData, reg='l1')\n\n return scores\n\n\ndef saveNonZerosCoef(clf, reg, dataType):\n\n nonZerosParams = np.where(clf.coef_ != 0)[0]\n print(\"Nombre de coef : \", len(clf.coef_[0]))\n print(\"Nombre de coef annulés : \", len(nonZerosParams))\n\n with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'w') as f:\n f.write(str(list(nonZerosParams)))\n\n analyzeCoef(dataType, reg)\n\n\ndef analyzeCoef(dataType, reg):\n\n path = \"Images/Screenshots/\"\n \n with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'r') as f:\n wholeFile = f.read()\n print(\"Here\")\n print(wholeFile[0], wholeFile[-1])\n wholeFile = wholeFile[1:-1]\n numGen = map(int,wholeFile.split(','))\n\n #Step\n step = np.zeros(40)\n steps = np.array([i+1 for i in range(40)])\n for num in numGen:\n step[num%40] += 1\n\n numGen = map(int,wholeFile.split(','))\n\n #Elec\n elec = np.zeros(64)\n elecs = np.array([i+1 for i in range(64)])\n\n for num in numGen:\n elec[num//40] += 1\n\n ax = plt.subplot()\n\n steps = np.array(steps)/60\n \n ax.bar(steps, step, width=1/60)\n ax.set_title(\"Nombre de coefficients non annulés par pas de temps\")\n plt.savefig(path+'nonZerosStep{}{}.png'.format(dataType.title(),reg))\n\n plt.show()\n \n ax = plt.subplot()\n ax.bar(elecs, elec, width=1)\n ax.set_title(\"Nombre de coefficients non annulés par electrode\")\n plt.savefig(path+'nonZerosElec{}{}.png'.format(dataType.title(),reg))\n plt.show()\n\n#=============== Learner =============================\n#====================================================\ndef learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n \"\"\"\n Grid Search over a set of parameters for linear model\n \"\"\"\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,1,3)\n parameters = {'C': cRange}\n\n if penalty=='l1':\n dual=False\n else:\n dual=True\n\n #Creating Model and begin classification\n #=======================================\n classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n \n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=None)\n \n\n\ndef learnHyperNonLinear(X, y, xTest, yTest, scoring, transformedData,jobs=1):\n \"\"\"\n Grid Search over a set of parameters for a non-linear model\n \"\"\"\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n \n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,2,8)\n gRange = np.logspace(-5,2,8)\n parameters = {'C': cRange, 'gamma':gRange}\n \n #Creating Model and begin classification\n #=======================================\n classif = svm.SVC(class_weight=CLASS_WEIGHT)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3,refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n \n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\\\n 'NonLinear', 'l2', scoring, transformedData, scores=scores)\n\n \n else:\n print(\"No test, don't predict data\")\n \n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\\\n 'NonLinear', 'l2', scoring, transformedData, scores=None)\n\ndef learnRidge(X,y,xTest,yTest,scoring, transformedData, jobs):\n \"\"\"\n Grid Search over a set of parameters for linear model\n \"\"\"\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n alpha = np.logspace(-3,3,6)\n parameters = {'alpha': alpha}\n\n #Creating Model and begin classification\n #=======================================\n classif = RidgeClassifier(class_weight=CLASS_WEIGHT)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=10, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\\\n 'l2',scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\\\n 'l2',scoring, transformedData, scores=None)\n \n\ndef learnRandomForest(X,y,xTest,yTest,scoring, jobs):\n\n params = {\n 'n_estimators':[2,10,100],\n 'max_features':['auto',2,10],\n 'max_depth':[10,40,2],\n 'min_samples_split':[2,10,20,50]\n }\n \n forest = RandomForestClassifier()\n\n grd = grid_search.GridSearchCV(forest,params, scoring=scoring,cv=3,n_jobs=jobs,verbose=3)\n grd.fit(X,y)\n\n yPredTrain = grd.predict(X)\n yPredTest = grd.predict(xTest)\n\n print \"FOREST : \\n\"\n scores = getScores(y, yPredTrain, yTest, yPredTest)\n printScores(scores)\n\n\ndef learnCspPipeline(X, y, xTest, yTest, scoring,transformedData,jobs=1, classifier='lin'):\n\n testAvailable = np.size(xTest)\n \n X = vecToMat(X)\n\n if testAvailable:\n xTest = vecToMat(xTest)\n\n if classifier=='lin':\n classif = svm.LinearSVC(penalty='l2',class_weight=CLASS_WEIGHT)\n params = np.logspace(-5,1,3)\n hyper = 'classif__C'\n\n else:\n classif = RidgeClassifier(class_weight=CLASS_WEIGHT)\n params = np.logspace(-1,3,10)\n hyper = 'classif__alpha'\n\n csp = CSP(reg='ledoit_wolf',log=False)\n scaler = StandardScaler()\n pipe = Pipeline(steps = [('csp',csp), ('scaler',scaler), ('classif',classif)])\n pipe = Pipeline(steps = [('csp',csp), ('classif',classif)])\n\n n_components = [1,2,5,10,20,30,40,50]\n dico = {'csp__n_components':n_components, hyper:params}\n\n grd = grid_search.GridSearchCV(pipe,dico, cv=5, verbose=3, n_jobs=4)\n grd.fit(X,y)\n\n \n if testAvailable:\n scores = testModel(grd.best_estimator_,X,y,xTest,yTest,'l2')\n writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=scores)\n\n else:\n print(\"No test, don't predict data\") \n writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=None)\n\n\n \ndef learnElasticNet(X,y,xTest,yTest,scoring,transformedData='raw',jobs=1):\n\n # Parameters selection\n #====================\n alpha = np.linspace(0.01,0.2,5)\n l1_ratio = np.linspace(0.01,0.3,5)\n parameters = {'alpha': alpha, 'l1_ratio': l1_ratio}\n \n #Creating Model and begin classification\n #=======================================\n classif = ElasticNet(selection='random')\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3)\n\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n #Get results, print and write them into a file\n #============================================\n best = clf.best_estimator_\n print(clf.best_params_, clf.best_score_)\n\n if np.size(a,0)!=0:\n print(\"Predicting Data :\")\n yPredTrain = best.predict(X)\n yPredTrain[yPredTrain >= 0] = 1\n yPredTrain[yPredTrain < 0] = -1\n\n yPredTest = best.predict(xTest)\n yPredTest[yPredTest >= 0] = 1\n yPredTest[yPredTest < 0] = -1\n\n scores = getScores(y, yPredTrain, yTest, yPredTest)\n printScores(scores)\n \n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\\\n 'ElasticNet', 'l1l2', scoring, transformedData, scores)\n \n nonZerosParams = np.where(best.coef_ != 0)[0]\n print(len(nonZerosParams))\n print(nonZerosParams)\n\n with open('nonZerosParamsRawElasticNet', 'w') as f:\n f.write(str(list(nonZerosParams)))\n\ndef learnStep(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n\n baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)\n cRange = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1, 10]\n parameters = {'C': cRange}\n\n best_score = 0\n numStep = np.size(X,1)//64\n keptStep = np.ones(numStep, dtype=bool)\n copyX = copy(X)\n copyXTest = copy(xTest)\n\n scores = np.zeros(numStep)\n scoreDecrease = False\n numFailed = 0\n \n while not scoreDecrease:\n\n scores[:] = 0\n\n for step in range(numStep):\n if not keptStep[step] :\n continue\n else:\n erased = list(np.where(keptStep==False)[0])\n \n if erased != []:\n erased.append(step)\n X = delTimeStep(X, erased, transformedData)\n xTest = delTimeStep(xTest, erased, transformedData)\n else:\n X = delTimeStep(X,step, transformedData)\n xTest = delTimeStep(xTest, step, transformedData)\n\n print(\"Learning Model without step N°\",step)\n\n clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring,\\\n cv=5, n_jobs=jobs, verbose=3)\n clf.fit(X,y)\n\n best = clf.best_estimator_\n print(clf.best_params_, clf.best_score_)\n\n yPredTest = best.predict(xTest)\n\n\n if scoring=='f1':\n scores[step] = f1_score(yTest, yPredTest)\n else:\n scores[step] = roc_auc_score(yTest, yPredTest)\n\n\n print(\"Score :\", scores[step])\n\n #post process :\n X = copy(copyX)\n xTest = copy(copyXTest)\n \n worstStep = np.argmax(scores)\n keptStep[worstStep] = False\n\n print(\"Score max : {}, removing step N°{}\".format(scores[worstStep], worstStep))\n print(\"Step removed : \", np.where(keptStep==False))\n print(\"Past Best : \", best_score)\n\n if scores[worstStep] > best_score:\n best_score = scores[worstStep]\n else:\n numFailed += 1\n \n if numFailed > 3:\n scoreDecrease = True\n\ndef learnElecFaster(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n \n baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)\n cRange = np.logspace(-5,2,8)\n \n parameters = {'C': cRange}\n\n if np.size(xTest)!=0:\n X = np.concatenate((X,xTest))\n y = np.concatenate((y,yTest))\n \n # clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3)\n # clf.fit(X,y)\n # bestParams = clf.best_params_\n # print(bestParams['C'], clf.best_score_)\n\n # C = bestParams['C']\n C = 1e-5\n baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)\n\n best_score = 0\n best_selection = []\n keptElec = np.ones(64, dtype=bool)\n\n copyX = copy(X)\n \n scores = np.zeros(64)\n scoreDecrease = False\n numFailed = 0\n \n for numIter in range(63):\n\n scores[:] = 0\n\n for elec in range(64):\n if not keptElec[elec] :\n #Already deleted\n continue\n else:\n\n print(\"Deleting Electrode(s) ...\")\n erased = list(np.where(keptElec==False)[0]) \n if erased != []:\n erased.append(elec)\n X = delElec(X, erased, transformedData)\n else:\n X = delElec(X,elec, transformedData)\n\n print(\"Learning Model without elec N°\",elec)\n\n clf = grid_search.GridSearchCV(baseClf, {'C':[C]}, scoring=scoring, cv=10, n_jobs=jobs, verbose=1)\n clf.fit(X,y)\n \n scores[elec] = clf.best_score_\n\n print(scores[elec])\n \n #post process :\n X = copy(copyX)\n \n worstElec = np.argmax(scores)\n keptElec[worstElec] = False\n removedElec = np.where(keptElec==False)\n print(\"Score max : {}, removing elec N°{}\".format(scores[worstElec], worstElec))\n print(\"Elec removed : \", removedElec)\n \n print(\"Past Best : \", best_score, \"with : \", best_selection)\n\n if scores[worstElec] > best_score:\n best_score = scores[worstElec]\n best_selection = np.where(keptElec==False)\n\n else:\n numFailed += 1\n\n with open(\"selecStep.txt\",'a') as f:\n f.write(\"{} : {} with elec {}, numFailed : {}\\n\".format(numIter, scores[worstElec], removedElec, numFailed))\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pygame, states, events
from settings import all as settings
import gui
def handleInput(world, event):
if event == events.btnSelectOn or event == events.btnEscapeOn:
bwd(world)
if event%10 == 0:
world.sounds['uiaction'].play(0)
# world.shouldRedraw = True
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw( world ):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0,len(perfdata)):
separate_point = 15
if(i<separate_point):
if(i<5):
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) * 30, alignment="midleft", color=(239,145,242))
else:
gui.simpleText(world, perfdata[i], r.centerx-350, (i+1)*30,alignment="midleft")
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i-separate_point + 1) * 30, alignment="midleft")
world.shouldRedraw = False
def enter(world):
world.state = states.Perf
world.configCatX = 0
world.configOptX = -1
world.shouldRedraw = True
|
normal
|
{
"blob_id": "8650e0f1e7f2ac42c3c78191f79810f5befc9f41",
"index": 3298,
"step-1": "<mask token>\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef handleInput(world, event):\n if event == events.btnSelectOn or event == events.btnEscapeOn:\n bwd(world)\n if event % 10 == 0:\n world.sounds['uiaction'].play(0)\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef handleInput(world, event):\n if event == events.btnSelectOn or event == events.btnEscapeOn:\n bwd(world)\n if event % 10 == 0:\n world.sounds['uiaction'].play(0)\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\ndef enter(world):\n world.state = states.Perf\n world.configCatX = 0\n world.configOptX = -1\n world.shouldRedraw = True\n",
"step-4": "import pygame, states, events\nfrom settings import all as settings\nimport gui\n\n\ndef handleInput(world, event):\n if event == events.btnSelectOn or event == events.btnEscapeOn:\n bwd(world)\n if event % 10 == 0:\n world.sounds['uiaction'].play(0)\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\ndef enter(world):\n world.state = states.Perf\n world.configCatX = 0\n world.configOptX = -1\n world.shouldRedraw = True\n",
"step-5": "import pygame, states, events\r\nfrom settings import all as settings\r\n\r\nimport gui\r\n\r\ndef handleInput(world, event):\r\n if event == events.btnSelectOn or event == events.btnEscapeOn:\r\n bwd(world)\r\n\r\n if event%10 == 0:\r\n world.sounds['uiaction'].play(0)\r\n # world.shouldRedraw = True\r\n\r\n\r\ndef bwd(world):\r\n if world.state >= states.Config:\r\n return left(world)\r\n\r\n world.shouldRedraw = True\r\n world.state = states.Intro\r\n\r\n\r\ndef draw( world ):\r\n if not world.shouldRedraw:\r\n return\r\n\r\n r = world.worldsurf_rect\r\n world.worldsurf.fill(world.bg_color)\r\n\r\n perfdata = world.getperf()\r\n for i in range(0,len(perfdata)):\r\n separate_point = 15\r\n if(i<separate_point):\r\n if(i<5):\r\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) * 30, alignment=\"midleft\", color=(239,145,242))\r\n else:\r\n gui.simpleText(world, perfdata[i], r.centerx-350, (i+1)*30,alignment=\"midleft\")\r\n else:\r\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i-separate_point + 1) * 30, alignment=\"midleft\")\r\n world.shouldRedraw = False\r\n\r\ndef enter(world):\r\n world.state = states.Perf\r\n world.configCatX = 0\r\n world.configOptX = -1\r\n world.shouldRedraw = True\r\n\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import json
from constants import *
from coattention_layer import *
from prepare_generator import *
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(
dataset)
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True,
verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# Save the weights using the `checkpoint_path` format
model.save_weights(CHECKPOINT_PATH +
'/cp-{epoch: 04d}.ckpt'.format(epoch=0))
history = model.fit(x=train_generator,
epochs=EPOCHS,
validation_data=val_generator,
callbacks=[checkpoint, earlystop_callback],
workers=6,
use_multiprocessing=True)
# save history
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
# prediction
predictions = model.predict(val_generator,
workers=6,
use_multiprocessing=True,
verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention',
'EMBEDDING': 'keras',
"DATASET": DATASET,
"OPTIMIZER": 'Adam',
"EARLY STOPPING": 'val_loss',
"LOSS": 'categorical_crossentropy',
'DROPOUT_RATE': DROPOUT_RATE,
"EMBEDDING_DIM": EMBEDDING_DIM,
"EPOCHS": EPOCHS,
"BATCH_SIZE": BATCH_SIZE,
"SEQ_LENGTH": SEQ_LENGTH,
"NUM_CLASSES": NUM_CLASSES}
print("save config in" + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
Train(dataset=2)
|
normal
|
{
"blob_id": "a8d52d81ef6538e9cb8a0a9cab7cd0a778454c8e",
"index": 6424,
"step-1": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\nTrain(dataset=2)\n",
"step-4": "import json\nfrom constants import *\nfrom coattention_layer import *\nfrom prepare_generator import *\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\nTrain(dataset=2)\n",
"step-5": "import json\r\nfrom constants import *\r\nfrom coattention_layer import *\r\nfrom prepare_generator import *\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping\r\n\r\n\r\ndef coattention(num_embeddings):\r\n image_input = Input(shape=(196, 512))\r\n question_input = Input(shape=(SEQ_LENGTH,))\r\n\r\n output = CoattentionModel(num_embeddings)(question_input, image_input)\r\n\r\n model = Model(inputs=[question_input, image_input], outputs=output)\r\n\r\n return model\r\n\r\n\r\ndef scheduler(epoch):\r\n if epoch < 10:\r\n return 0.0001\r\n else:\r\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\r\n\r\n\r\ndef Train(dataset=True):\r\n\r\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(\r\n dataset)\r\n\r\n save_config(dataset)\r\n\r\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\r\n save_weights_only=True,\r\n verbose=1)\r\n\r\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\r\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\r\n\r\n model = coattention(VOCAB_SIZE)\r\n\r\n model.compile(optimizer=Adam(learning_rate=LR),\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n model.summary()\r\n\r\n # Save the weights using the `checkpoint_path` format\r\n model.save_weights(CHECKPOINT_PATH +\r\n '/cp-{epoch: 04d}.ckpt'.format(epoch=0))\r\n\r\n history = model.fit(x=train_generator,\r\n epochs=EPOCHS,\r\n validation_data=val_generator,\r\n callbacks=[checkpoint, earlystop_callback],\r\n workers=6,\r\n use_multiprocessing=True)\r\n\r\n # save history\r\n with open(HISTORY_PATH, 'w') as file:\r\n json.dump(history.history, file)\r\n\r\n # prediction\r\n predictions = model.predict(val_generator,\r\n workers=6,\r\n use_multiprocessing=True,\r\n verbose=1)\r\n\r\n ans_vocab = load_ans_vocab()\r\n\r\n result = []\r\n for q in range(len(val_question_ids)):\r\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\r\n q_id = int(val_question_ids[q])\r\n result.append({u'answer': ans, u'question_id': q_id})\r\n\r\n with open(PRED_PATH, 'w') as file:\r\n json.dump(list(result), file)\r\n\r\n return\r\n\r\n\r\ndef save_config(dataset):\r\n if dataset == 0:\r\n DATASET = 'English'\r\n if dataset == 1:\r\n DATASET = 'Google'\r\n if dataset == 2:\r\n DATASET = 'Targoman'\r\n\r\n config = {'NAME': 'coattention',\r\n 'EMBEDDING': 'keras',\r\n \"DATASET\": DATASET,\r\n \"OPTIMIZER\": 'Adam',\r\n \"EARLY STOPPING\": 'val_loss',\r\n \"LOSS\": 'categorical_crossentropy',\r\n 'DROPOUT_RATE': DROPOUT_RATE,\r\n \"EMBEDDING_DIM\": EMBEDDING_DIM,\r\n \"EPOCHS\": EPOCHS,\r\n \"BATCH_SIZE\": BATCH_SIZE,\r\n \"SEQ_LENGTH\": SEQ_LENGTH,\r\n \"NUM_CLASSES\": NUM_CLASSES}\r\n\r\n print(\"save config in\" + str(CONFIG_PATH))\r\n with open(CONFIG_PATH, 'w') as file:\r\n json.dump(config, file)\r\n\r\n return\r\n\r\n\r\nTrain(dataset=2)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
# page = requests.get("https://www.worldometers.info/coronavirus/")
# soup = BeautifulSoup(page.content, 'html.parser')
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
# Initialize application
app = Flask(__name__)
@app.route("/")
def hello():
return "Flask setup"
def sheets_row_writer(data_list):
print("sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list)
print("Write complete")
def sheets_row_writer_donor(data_list_donor):
print("donor sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list_donor)
print("Write complete")
def death_global():
page = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
cases_list = []
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return "There are"+cases_list[0]+" Total cases out of which"+cases_list[1]+" have died and"+cases_list[2]+" have recovered . There are still "+active_cases+" active cases."
app.route("/death/global", methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get("https://www.worldometers.info/coronavirus/")
response = death_global()
reply = { "fulfillmentText": response }
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get("https://www.worldometers.info/coronavirus/country/"+id+"/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return "In " +idu+" There are"+cases_list[0]+"Total cases out of which"+cases_list[1]+"are dead and"+cases_list[2]+"have already recovered . There are still "+active_cases+ " active cases ."
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print (intent)
def news_nepal_int():
url = "https://nepalcorona.info/api/v1/news"
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{
"card":{
"title":data1['title'],
"subtitle":"Source: "+data1['source']+" >>",
"imageUri":data1['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data1['url']
},
{
"text":"Corona Symptoms",
"postback":"symptoms"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data2['title'],
"subtitle":"Source "+data2['source']+" >>",
"imageUri":data2['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data2['url']
},
{
"text":"Live Nepal Data",
"postback":"live-nepal-data"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data3['title'],
"subtitle":"Source "+data3['source']+" >>",
"imageUri":data3['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data3['url']
},
{
"text":"Self Isolation",
"postback":"self isolation"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response2 }
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0],name[0],phone[0],place[0]]
sheets_row_writer(ilist)
response2 = "Hello "+name[0]+" so you are looking for "+item_required[0]+" Your location is "+place[0]+" One of our Team will contact you @ " +phone[0]+" soon !"
response = [
{
"quickReplies": {
"title": response2,
"quickReplies": [
"Call a Doctor",
"Get Online Support"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']
url = "https://nepalcorona.info/api/v1/faqs"
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ["Live Nepali Data","Latest Nepali News","Symptoms","Preventions","Self Isolation","Play Corona Quiz"]
faqs = todos['data']
faq = faqs[rand]
if(ff=="English FAQ" or ff =="More Quizzles" or ff =="भाषा परिवर्तन"):
randq= faq['question']
randa = faq['answer']
opt1 = "More Quizzles"
opt2 = "Switch Language"
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = "अरु देखाउनुहोस >>"
opt2 = "भाषा परिवर्तन"
response2 = "Q. "+randq+"\n A. "+randa+"\n"
response = [{
"text": {
"text": [
randq
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"quickReplies": {
"title": randa,
"quickReplies": [
opt1,
opt2,
random.choice(opt3)
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def blood_pal_yes():
print (intent)
print (data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group,blood_amount,location,case,date,phone]
sheets_row_writer(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "The following request has been sent. We will contact you shortly. "+blood_group+" blood ("+str(blood_amount)+" ) required for "+case+" at "+location+" On "+date+" - "+phone+" Thank you ."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def blood_pal_donor_yes():
print (intent)
print (data)
permananet_address = data['queryResult']['parameters']['permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation= data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]
sheets_row_writer_donor(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "Thank you "+name+" for registration as a blood donor We will contact you at the time of urgency in your area."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def world_data_live():
text = death_global()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Data",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#district summary all
def district_all_summary():
text = dss.district_all_summary()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Summary",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#provience summary all should remove
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"District-Summary",
"Province-Data",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def proviencewise_detail():
#get provience name
#return dss.ard(provience)
#card
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [
{
"card":{
"title": "Covid-19 Provience: "+str(province)+" | Details",
"subtitle":response_summary,
"imageUri": "https://setopati.net/wp-content/uploads/2018/02/province6.jpg",
"buttons":[
{
"text":"Prov "+str(province)+" District Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Prov "+str(province)+" Vdc-Mun Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
# provincecode = pcode
if(dvdc=="vdc"):
print('inside vdc')
typ = "vdc"
else:
print('inside district')
typ = "district"
data_return = dss.ard(code,typ)
response = [
{
"quickReplies": {
"title": data_return,
"quickReplies": [
"District Summary",
"Province Summary",
"Nepali News",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def nepal_data_new_main_int():
url = "https://nepalcorona.info/api/v1/data/nepal"
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = "Nepal Cases \n Positive :"+str(todos["tested_positive"])+" | Recovered: "+str(todos["recovered"])+"| Deaths:"+str(todos["deaths"])+" "+"\n"
print(response2)
response_summary = dss.affected_summary()
response = [
{
"text": {
"text": [
response2
]
},
"platform": "FACEBOOK"
},
{
"text": {
"text": [
""
]
}
},
{
"card":{
"title": "Covid-19 Nepal | Stats",
"subtitle":response_summary,
# "subtitle": "Find details by Province, Municipals and Districts for Nepal",
"imageUri": "https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png",
"buttons":[
{
"text":"Province Summary",
"postback":"province data int"
},
{
"text":"District-Summary",
"postback":"district data int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def batti_update():
url = "https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM"
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos["feeds"][0]
response2 = "Batti Status Now :"+str(feeds["field1"]+"\n Last Updated: "+str(feeds["created_at"]))
print(response2)
reply = { "fulfillmentText": response2 }
return reply
def default():
return "Incorrect Data"
switcher = {
"nepal data int": nepal_data_new_main_int,
"news-nepal-int": news_nepal_int,
"i need help main int - yes": i_need_help_yes,
"faq-que-ans-int": faq_ques_ans,
"bloodpal-need-blood-main-int - yes": blood_pal_yes,
"data world int": world_data_live,
"district data int": district_all_summary,
"province data int": province_all_summary,
"province-wise-data": proviencewise_detail,
"dis-vdc data detail int": dis_vdc_detail,
"bloodpal-become-donor-main-int":blood_pal_donor_yes,
"batti-update-intent":batti_update
}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "267cb37f2ccad5b02a809d9b85327eacd9a49515",
"index": 1061,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\n<mask token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\n<mask token>\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\n<mask token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<mask token>\n",
"step-3": "<mask token>\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\n# page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n# soup = BeautifulSoup(page.content, 'html.parser')\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n\n# Initialize application\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n return \"Flask setup\"\n\ndef sheets_row_writer(data_list):\n print(\"sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list) \n print(\"Write complete\")\n\ndef sheets_row_writer_donor(data_list_donor):\n print(\"donor sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list_donor) \n print(\"Write complete\")\n\ndef death_global():\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n \n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n cases_list = []\n\n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n\n for res in result:\n cases_list.append(res.text)\n\n return \"There are\"+cases_list[0]+\" Total cases out of which\"+cases_list[1]+\" have died and\"+cases_list[2]+\" have recovered . There are still \"+active_cases+\" active cases.\"\n\napp.route(\"/death/global\", methods=['POST'])\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n response = death_global()\n reply = { \"fulfillmentText\": response } \n return jsonify(reply)\n \n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\"https://www.worldometers.info/coronavirus/country/\"+id+\"/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n \n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n\n return \"In \" +idu+\" There are\"+cases_list[0]+\"Total cases out of which\"+cases_list[1]+\"are dead and\"+cases_list[2]+\"have already recovered . There are still \"+active_cases+ \" active cases .\"\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print (intent)\n \n def news_nepal_int():\n url = \"https://nepalcorona.info/api/v1/news\"\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n \n response2 = [{\n \"card\":{\n \"title\":data1['title'],\n \"subtitle\":\"Source: \"+data1['source']+\" >>\",\n \"imageUri\":data1['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data1['url']\n },\n {\n \"text\":\"Corona Symptoms\",\n \"postback\":\"symptoms\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data2['title'],\n \"subtitle\":\"Source \"+data2['source']+\" >>\",\n \"imageUri\":data2['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data2['url']\n },\n {\n \"text\":\"Live Nepal Data\",\n \"postback\":\"live-nepal-data\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data3['title'],\n \"subtitle\":\"Source \"+data3['source']+\" >>\",\n \"imageUri\":data3['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data3['url']\n },\n {\n \"text\":\"Self Isolation\",\n \"postback\":\"self isolation\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n ]\n\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0],name[0],phone[0],place[0]]\n sheets_row_writer(ilist)\n response2 = \"Hello \"+name[0]+\" so you are looking for \"+item_required[0]+\" Your location is \"+place[0]+\" One of our Team will contact you @ \" +phone[0]+\" soon !\"\n response = [\n\n {\n \"quickReplies\": {\n \"title\": response2,\n \"quickReplies\": [\n \"Call a Doctor\",\n \"Get Online Support\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']\n url = \"https://nepalcorona.info/api/v1/faqs\"\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = [\"Live Nepali Data\",\"Latest Nepali News\",\"Symptoms\",\"Preventions\",\"Self Isolation\",\"Play Corona Quiz\"]\n faqs = todos['data']\n faq = faqs[rand]\n if(ff==\"English FAQ\" or ff ==\"More Quizzles\" or ff ==\"भाषा परिवर्तन\"):\n randq= faq['question']\n randa = faq['answer']\n opt1 = \"More Quizzles\"\n opt2 = \"Switch Language\"\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = \"अरु देखाउनुहोस >>\"\n opt2 = \"भाषा परिवर्तन\"\n\n response2 = \"Q. \"+randq+\"\\n A. \"+randa+\"\\n\"\n response = [{\n \"text\": {\n \"text\": [\n randq\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n {\n \"quickReplies\": {\n \"title\": randa,\n \"quickReplies\": [\n opt1,\n opt2,\n random.choice(opt3)\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response }\n\n return reply\n \n def blood_pal_yes():\n print (intent)\n print (data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group,blood_amount,location,case,date,phone]\n sheets_row_writer(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"The following request has been sent. We will contact you shortly. \"+blood_group+\" blood (\"+str(blood_amount)+\" ) required for \"+case+\" at \"+location+\" On \"+date+\" - \"+phone+\" Thank you .\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def blood_pal_donor_yes():\n print (intent)\n print (data)\n permananet_address = data['queryResult']['parameters']['permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation= data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"Thank you \"+name+\" for registration as a blood donor We will contact you at the time of urgency in your area.\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n\n def world_data_live():\n text = death_global()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Data\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #district summary all\n def district_all_summary():\n text = dss.district_all_summary()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #provience summary all should remove \n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"District-Summary\",\n \"Province-Data\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def proviencewise_detail():\n #get provience name\n #return dss.ard(provience)\n #card \n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n\n response = [\n {\n \"card\":{\n \"title\": \"Covid-19 Provience: \"+str(province)+\" | Details\",\n \"subtitle\":response_summary,\n \"imageUri\": \"https://setopati.net/wp-content/uploads/2018/02/province6.jpg\",\n \"buttons\":[\n {\n \"text\":\"Prov \"+str(province)+\" District Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Prov \"+str(province)+\" Vdc-Mun Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n \n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n\n\n # provincecode = pcode\n if(dvdc==\"vdc\"):\n print('inside vdc')\n typ = \"vdc\" \n else:\n print('inside district')\n typ = \"district\"\n\n data_return = dss.ard(code,typ)\n response = [\n {\n \"quickReplies\": {\n \"title\": data_return,\n \"quickReplies\": [\n \"District Summary\",\n \"Province Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def nepal_data_new_main_int():\n url = \"https://nepalcorona.info/api/v1/data/nepal\"\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n\n \n response2 = \"Nepal Cases \\n Positive :\"+str(todos[\"tested_positive\"])+\" | Recovered: \"+str(todos[\"recovered\"])+\"| Deaths:\"+str(todos[\"deaths\"])+\" \"+\"\\n\"\n print(response2)\n response_summary = dss.affected_summary()\n\n response = [\n {\n \"text\": {\n \"text\": [\n response2\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\": {\n \"text\": [\n \"\"\n ]\n }\n },\n {\n \"card\":{\n \"title\": \"Covid-19 Nepal | Stats\",\n \"subtitle\":response_summary,\n # \"subtitle\": \"Find details by Province, Municipals and Districts for Nepal\",\n \"imageUri\": \"https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png\",\n \"buttons\":[\n {\n \"text\":\"Province Summary\",\n \"postback\":\"province data int\"\n },\n {\n \"text\":\"District-Summary\",\n \"postback\":\"district data int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def batti_update():\n url = \"https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM\"\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos[\"feeds\"][0]\n \n response2 = \"Batti Status Now :\"+str(feeds[\"field1\"]+\"\\n Last Updated: \"+str(feeds[\"created_at\"]))\n print(response2)\n reply = { \"fulfillmentText\": response2 }\n return reply\n\n\n def default():\n return \"Incorrect Data\"\n\n switcher = {\n \"nepal data int\": nepal_data_new_main_int,\n \"news-nepal-int\": news_nepal_int,\n \"i need help main int - yes\": i_need_help_yes,\n \"faq-que-ans-int\": faq_ques_ans,\n \"bloodpal-need-blood-main-int - yes\": blood_pal_yes,\n \"data world int\": world_data_live,\n \"district data int\": district_all_summary,\n \"province data int\": province_all_summary,\n \"province-wise-data\": proviencewise_detail,\n \"dis-vdc data detail int\": dis_vdc_detail,\n \"bloodpal-become-donor-main-int\":blood_pal_donor_yes,\n \"batti-update-intent\":batti_update\n }\n \n def switch(intentname):\n return switcher.get(intentname, default)()\n\n reply = switch(intent)\n return jsonify(reply)\n \n\nif __name__ == '__main__':\n \n app.run()\n",
"step-ids": [
5,
6,
9,
10,
11
]
}
|
[
5,
6,
9,
10,
11
] |
import numpy as np
import mysql.connector
from mysql.connector import Error
import matplotlib.pyplot as plt
def readData():
connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
data = records[0]
# nama_pasien = data[1]
filename = data[2]
# dataSignal = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/"+filename,delimiter=',')
## READ TXT FILE
dataSignal = []
my_file = open("C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/" + filename, "r")
for line in my_file.readlines():
if line[-1:] == "\n":
dataSignal.append(line[:-1])
else:
dataSignal.append(line)
my_file.close()
# C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses
if (connection.is_connected()):
cursor.close()
connection.close()
return dataSignal, filename
def saveData(data,label,filename):
connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_'+filename
with open(r'C:\xampp\htdocs\projectCAD\public\storage\upload/files\hasilproses/' + filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
#Select Pasien from database
sql_select = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = "UPDATE pasien SET hasilproses = '" + filename_hasil + "',label = '"+str(label[0])+"' WHERE id = "+str(id_pasien)
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/" + fiturname, delimiter=',')
if (connection.is_connected()):
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur,label):
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
# dbfitur = getFiturEkstraksi()
# dbfitur.append(fitur)
fiturname = 'fitur.txt'
rowfitur = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+fiturname, "w")
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+labelname, "w")
for row in range(len(label)):
np.savetxt(rowlabel,row)
rowlabel.close()
sql_update = "UPDATE fitur_ekstraksis SET fitur = '" + fiturname + "', label = '" + labelname + "' WHERE id = 1"
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
|
normal
|
{
"blob_id": "4d7696c832f9255fbc68040b61fde12e057c06fa",
"index": 3899,
"step-1": "<mask token>\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-2": "<mask token>\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-3": "<mask token>\n\n\ndef readData():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n records = cursor.fetchall()\n data = records[0]\n filename = data[2]\n dataSignal = []\n my_file = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +\n filename, 'r')\n for line in my_file.readlines():\n if line[-1:] == '\\n':\n dataSignal.append(line[:-1])\n else:\n dataSignal.append(line)\n my_file.close()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return dataSignal, filename\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-4": "import numpy as np\nimport mysql.connector\nfrom mysql.connector import Error\nimport matplotlib.pyplot as plt\n\n\ndef readData():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n records = cursor.fetchall()\n data = records[0]\n filename = data[2]\n dataSignal = []\n my_file = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +\n filename, 'r')\n for line in my_file.readlines():\n if line[-1:] == '\\n':\n dataSignal.append(line[:-1])\n else:\n dataSignal.append(line)\n my_file.close()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return dataSignal, filename\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-5": "import numpy as np\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\nimport matplotlib.pyplot as plt\r\n\r\ndef readData():\r\n connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')\r\n\r\n sql_select_Query = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor = connection.cursor()\r\n cursor.execute(sql_select_Query)\r\n records = cursor.fetchall()\r\n data = records[0]\r\n # nama_pasien = data[1]\r\n filename = data[2]\r\n # dataSignal = np.genfromtxt(r\"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/\"+filename,delimiter=',')\r\n\r\n ## READ TXT FILE\r\n dataSignal = []\r\n my_file = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/\" + filename, \"r\")\r\n for line in my_file.readlines():\r\n if line[-1:] == \"\\n\":\r\n dataSignal.append(line[:-1])\r\n else:\r\n dataSignal.append(line)\r\n my_file.close()\r\n\r\n # C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n return dataSignal, filename\r\n\r\ndef saveData(data,label,filename):\r\n connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')\r\n cursor = connection.cursor()\r\n\r\n filename_hasil = 'hasilproses_'+filename\r\n with open(r'C:\\xampp\\htdocs\\projectCAD\\public\\storage\\upload/files\\hasilproses/' + filename_hasil, 'w') as f:\r\n for row in data:\r\n f.write(str(row) + '\\n')\r\n f.close()\r\n\r\n #Select Pasien from database\r\n sql_select = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor.execute(sql_select)\r\n records = cursor.fetchall()\r\n data = records[0]\r\n id_pasien = data[0]\r\n print(label[0])\r\n\r\n sql_update = \"UPDATE pasien SET hasilproses = '\" + filename_hasil + \"',label = '\"+str(label[0])+\"' WHERE id = \"+str(id_pasien)\r\n cursor.execute(sql_update)\r\n connection.commit()\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return print(\"sukses\")\r\n\r\ndef getFiturEkstraksi():\r\n connection = mysql.connector.connect(host='localhost',\r\n database='cad_ultrasound',\r\n user='root',\r\n password='')\r\n cursor = connection.cursor()\r\n sql_select_Query = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor.execute(sql_select_Query)\r\n fiturname = cursor.fetchall()\r\n fitur = np.genfromtxt(r\"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/\" + fiturname, delimiter=',')\r\n\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return fitur\r\n\r\ndef saveFiturEkstraksi(fitur,label):\r\n connection = mysql.connector.connect(host='localhost',\r\n database='cad_ultrasound',\r\n user='root',\r\n password='')\r\n cursor = connection.cursor()\r\n # dbfitur = getFiturEkstraksi()\r\n # dbfitur.append(fitur)\r\n fiturname = 'fitur.txt'\r\n rowfitur = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/\"+fiturname, \"w\")\r\n for row in range(len(fitur)):\r\n np.savetxt(rowfitur, row)\r\n rowfitur.close()\r\n\r\n labelname = 'label.txt'\r\n rowlabel = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/\"+labelname, \"w\")\r\n for row in range(len(label)):\r\n np.savetxt(rowlabel,row)\r\n rowlabel.close()\r\n\r\n sql_update = \"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname + \"', label = '\" + labelname + \"' WHERE id = 1\"\r\n cursor.execute(sql_update)\r\n connection.commit()\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return print(\"sukses\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self, gtab, method='gqi2', sampling_length=1.2,
normalize_peaks=False):
""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
self.gqi_vector = np.real(H(np.dot(self.model.b_vector,
sphere.vertices.T) * self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(self.model.
b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
<|reserved_special_token_0|>
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2
) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))
return t0, t1, t2, npa
<|reserved_special_token_0|>
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2]) * v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' % (np.
array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
<|reserved_special_token_0|>
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p
])) for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self, gtab, method='gqi2', sampling_length=1.2,
normalize_peaks=False):
""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
self.gqi_vector = np.real(H(np.dot(self.model.b_vector,
sphere.vertices.T) * self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(self.model.
b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
<|reserved_special_token_0|>
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2
) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))
return t0, t1, t2, npa
<|reserved_special_token_0|>
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2]) * v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' % (np.
array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
def patch_sum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null
return np.sum([odf[i] for i in eqvert])
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p
])) for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self, gtab, method='gqi2', sampling_length=1.2,
normalize_peaks=False):
""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
self.gqi_vector = np.real(H(np.dot(self.model.b_vector,
sphere.vertices.T) * self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(self.model.
b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
def squared_radial_component(x, tol=0.01):
""" Part of the GQI2 integral
Eq.8 in the referenced paper by Yeh et al. 2010
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / x ** 3
x_near_zero = (x < tol) & (x > -tol)
return np.where(x_near_zero, 1.0 / 3, result)
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2
) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))
return t0, t1, t2, npa
<|reserved_special_token_0|>
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2]) * v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' % (np.
array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
def patch_sum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null
return np.sum([odf[i] for i in eqvert])
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p
])) for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
import warnings
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self, gtab, method='gqi2', sampling_length=1.2,
normalize_peaks=False):
""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
self.gqi_vector = np.real(H(np.dot(self.model.b_vector,
sphere.vertices.T) * self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(self.model.
b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
def squared_radial_component(x, tol=0.01):
""" Part of the GQI2 integral
Eq.8 in the referenced paper by Yeh et al. 2010
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / x ** 3
x_near_zero = (x < tol) & (x > -tol)
return np.where(x_near_zero, 1.0 / 3, result)
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2
) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))
return t0, t1, t2, npa
def equatorial_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial zone conjugate
to 'pole' with width half 'width' degrees
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) <
np.abs(np.sin(np.pi * width / 180))]
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2]) * v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' % (np.
array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >
np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
def patch_sum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' % (np.
array_str(pole), width))
return np.Null
return np.sum([odf[i] for i in eqvert])
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p
])) for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
<|reserved_special_token_1|>
""" Classes and functions for generalized q-sampling """
import numpy as np
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
import warnings
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class GeneralizedQSamplingModel(OdfModel, Cache):
def __init__(self,
gtab,
method='gqi2',
sampling_length=1.2,
normalize_peaks=False):
r""" Generalized Q-Sampling Imaging [1]_
This model has the same assumptions as the DSI method i.e. Cartesian
grid sampling in q-space and fast gradient switching.
Implements equations 2.14 from [2]_ for standard GQI and equation 2.16
from [2]_ for GQI2. You can think of GQI2 as an analytical solution of
the DSI ODF.
Parameters
----------
gtab : object,
GradientTable
method : str,
'standard' or 'gqi2'
sampling_length : float,
diffusion sampling length (lambda in eq. 2.14 and 2.16)
References
----------
.. [1] Yeh F-C et al., "Generalized Q-Sampling Imaging", IEEE TMI, 2010
.. [2] Garyfallidis E, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Notes
-----
As of version 0.9, range of the sampling length in GQI2 has changed
to match the same scale used in the 'standard' method [1]_. This
means that the value of `sampling_length` should be approximately
1 - 1.3 (see [1]_, pg. 1628).
Examples
--------
Here we create an example where we provide the data, a gradient table
and a reconstruction sphere and calculate the ODF for the first
voxel in the data.
>>> from dipy.data import dsi_voxels
>>> data, gtab = dsi_voxels()
>>> from dipy.core.subdivide_octahedron import create_unit_sphere
>>> sphere = create_unit_sphere(5)
>>> from dipy.reconst.gqi import GeneralizedQSamplingModel
>>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)
>>> voxel_signal = data[0, 0, 0]
>>> odf = gq.fit(voxel_signal).odf(sphere)
See Also
--------
dipy.reconst.dsi.DiffusionSpectrumModel
"""
OdfModel.__init__(self, gtab)
self.method = method
self.Lambda = sampling_length
self.normalize_peaks = normalize_peaks
# 0.01506 = 6*D where D is the free water diffusion coefficient
# l_values sqrt(6 D tau) D free water diffusion coefficient and
# tau included in the b-value
scaling = np.sqrt(self.gtab.bvals * 0.01506)
tmp = np.tile(scaling, (3, 1))
gradsT = self.gtab.bvecs.T
b_vector = gradsT * tmp # element-wise product
self.b_vector = b_vector.T
@multi_voxel_fit
def fit(self, data):
return GeneralizedQSamplingFit(self, data)
class GeneralizedQSamplingFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
OdfFit.__init__(self, model, data)
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
self._qa = None
def odf(self, sphere):
""" Calculates the discrete ODF for a given discrete sphere.
"""
self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)
if self.gqi_vector is None:
if self.model.method == 'gqi2':
H = squared_radial_component
# print self.gqi_vector.shape
self.gqi_vector = np.real(H(np.dot(
self.model.b_vector, sphere.vertices.T) *
self.model.Lambda))
if self.model.method == 'standard':
self.gqi_vector = np.real(np.sinc(np.dot(
self.model.b_vector, sphere.vertices.T) *
self.model.Lambda / np.pi))
self.model.cache_set('gqi_vector', sphere, self.gqi_vector)
return np.dot(self.data, self.gqi_vector)
def normalize_qa(qa, max_qa=None):
""" Normalize quantitative anisotropy.
Used mostly with GQI rather than GQI2.
Parameters
----------
qa : array, shape (X, Y, Z, N)
where N is the maximum number of peaks stored
max_qa : float,
maximum qa value. Usually found in the CSF (corticospinal fluid).
Returns
-------
nqa : array, shape (x, Y, Z, N)
normalized quantitative anisotropy
Notes
-----
Normalized quantitative anisotropy has the very useful property
to be very small near gray matter and background areas. Therefore,
it can be used to mask out white matter areas.
"""
if max_qa is None:
return qa / qa.max()
return qa / max_qa
def squared_radial_component(x, tol=0.01):
""" Part of the GQI2 integral
Eq.8 in the referenced paper by Yeh et al. 2010
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / (x ** 3)
x_near_zero = (x < tol) & (x > -tol)
return np.where(x_near_zero, 1./3, result)
def npa(self, odf, width=5):
""" non-parametric anisotropy
Nimmo-Smith et al. ISMRM 2011
"""
# odf = self.odf(s)
t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)
psi0 = t0[1] ** 2
psi1 = t1[1] ** 2
psi2 = t2[1] ** 2
npa = (np.sqrt(
(psi0 - psi1) ** 2 +
(psi1 - psi2) ** 2 +
(psi2 - psi0) ** 2) /
np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2)))
# print 'tom >>>> ',t0,t1,t2,npa
return t0, t1, t2, npa
def equatorial_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial zone conjugate
to 'pole' with width half 'width' degrees
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]
def polar_zone_vertices(vertices, pole, width=5):
"""
finds the 'vertices' in the equatorial band around
the 'pole' of radius 'width' degrees
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]
def upper_hemi_map(v):
"""
maps a 3-vector into the z-upper hemisphere
"""
return np.sign(v[2])*v
def equatorial_maximum(vertices, odf, pole, width):
eqvert = equatorial_zone_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty equatorial band at %s pole with width %f' %
(np.array_str(pole), width))
return None, None
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def patch_vertices(vertices, pole, width):
"""
find 'vertices' within the cone of 'width' degrees around 'pole'
"""
return [i
for i, v in enumerate(vertices)
if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]
def patch_maximum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' %
(np.array_str(pole), width))
return np.Null, np.Null
eqvals = [odf[i] for i in eqvert]
eqargmax = np.argmax(eqvals)
eqvertmax = eqvert[eqargmax]
eqvalmax = eqvals[eqargmax]
return eqvertmax, eqvalmax
def odf_sum(odf):
return np.sum(odf)
def patch_sum(vertices, odf, pole, width):
eqvert = patch_vertices(vertices, pole, width)
# need to test for whether eqvert is empty or not
if len(eqvert) == 0:
print('empty cone around pole %s with with width %f' %
(np.array_str(pole), width))
return np.Null
return np.sum([odf[i] for i in eqvert])
def triple_odf_maxima(vertices, odf, width):
indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])
odfmax1 = odf[indmax1]
pole = vertices[indmax1]
eqvert = equatorial_zone_vertices(vertices, pole, width)
indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)
indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p]))
for p in eqvert])]
odfmax3 = odf[indmax3]
"""
cross12 = np.cross(vertices[indmax1],vertices[indmax2])
cross12 = cross12/np.sqrt(np.sum(cross12**2))
indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)
"""
return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]
|
flexible
|
{
"blob_id": "2f193cb1eaf7b5e99d20025716a248144af90b92",
"index": 1925,
"step-1": "<mask token>\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\n<mask token>\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\n<mask token>\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\n<mask token>\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-2": "<mask token>\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\n<mask token>\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\n<mask token>\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-3": "<mask token>\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\ndef squared_radial_component(x, tol=0.01):\n \"\"\" Part of the GQI2 integral\n\n Eq.8 in the referenced paper by Yeh et al. 2010\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / x ** 3\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1.0 / 3, result)\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\n<mask token>\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-4": "<mask token>\nimport numpy as np\nfrom dipy.reconst.odf import OdfModel, OdfFit, gfa\nfrom dipy.reconst.cache import Cache\nimport warnings\nfrom dipy.reconst.multi_voxel import multi_voxel_fit\nfrom dipy.reconst.recspeed import local_maxima, remove_similar_vertices\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n\n def __init__(self, gtab, method='gqi2', sampling_length=1.2,\n normalize_peaks=False):\n \"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n self.gqi_vector = np.real(H(np.dot(self.model.b_vector,\n sphere.vertices.T) * self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(self.model.\n b_vector, sphere.vertices.T) * self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\ndef squared_radial_component(x, tol=0.01):\n \"\"\" Part of the GQI2 integral\n\n Eq.8 in the referenced paper by Yeh et al. 2010\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / x ** 3\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1.0 / 3, result)\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = np.sqrt((psi0 - psi1) ** 2 + (psi1 - psi2) ** 2 + (psi2 - psi0) ** 2\n ) / np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2))\n return t0, t1, t2, npa\n\n\ndef equatorial_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial zone conjugate\n to 'pole' with width half 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) <\n np.abs(np.sin(np.pi * width / 180))]\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2]) * v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' % (np.\n array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i for i, v in enumerate(vertices) if np.abs(np.dot(v, pole)) >\n np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' % (np.\n array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p\n ])) for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-5": "\"\"\" Classes and functions for generalized q-sampling \"\"\"\nimport numpy as np\nfrom dipy.reconst.odf import OdfModel, OdfFit, gfa\nfrom dipy.reconst.cache import Cache\nimport warnings\nfrom dipy.reconst.multi_voxel import multi_voxel_fit\nfrom dipy.reconst.recspeed import local_maxima, remove_similar_vertices\n\n\nclass GeneralizedQSamplingModel(OdfModel, Cache):\n def __init__(self,\n gtab,\n method='gqi2',\n sampling_length=1.2,\n normalize_peaks=False):\n r\"\"\" Generalized Q-Sampling Imaging [1]_\n\n This model has the same assumptions as the DSI method i.e. Cartesian\n grid sampling in q-space and fast gradient switching.\n\n Implements equations 2.14 from [2]_ for standard GQI and equation 2.16\n from [2]_ for GQI2. You can think of GQI2 as an analytical solution of\n the DSI ODF.\n\n Parameters\n ----------\n gtab : object,\n GradientTable\n method : str,\n 'standard' or 'gqi2'\n sampling_length : float,\n diffusion sampling length (lambda in eq. 2.14 and 2.16)\n\n References\n ----------\n .. [1] Yeh F-C et al., \"Generalized Q-Sampling Imaging\", IEEE TMI, 2010\n\n .. [2] Garyfallidis E, \"Towards an accurate brain tractography\", PhD\n thesis, University of Cambridge, 2012.\n\n Notes\n -----\n As of version 0.9, range of the sampling length in GQI2 has changed\n to match the same scale used in the 'standard' method [1]_. This\n means that the value of `sampling_length` should be approximately\n 1 - 1.3 (see [1]_, pg. 1628).\n\n Examples\n --------\n Here we create an example where we provide the data, a gradient table\n and a reconstruction sphere and calculate the ODF for the first\n voxel in the data.\n\n >>> from dipy.data import dsi_voxels\n >>> data, gtab = dsi_voxels()\n >>> from dipy.core.subdivide_octahedron import create_unit_sphere\n >>> sphere = create_unit_sphere(5)\n >>> from dipy.reconst.gqi import GeneralizedQSamplingModel\n >>> gq = GeneralizedQSamplingModel(gtab, 'gqi2', 1.1)\n >>> voxel_signal = data[0, 0, 0]\n >>> odf = gq.fit(voxel_signal).odf(sphere)\n\n See Also\n --------\n dipy.reconst.dsi.DiffusionSpectrumModel\n\n \"\"\"\n OdfModel.__init__(self, gtab)\n self.method = method\n self.Lambda = sampling_length\n self.normalize_peaks = normalize_peaks\n # 0.01506 = 6*D where D is the free water diffusion coefficient\n # l_values sqrt(6 D tau) D free water diffusion coefficient and\n # tau included in the b-value\n scaling = np.sqrt(self.gtab.bvals * 0.01506)\n tmp = np.tile(scaling, (3, 1))\n gradsT = self.gtab.bvecs.T\n b_vector = gradsT * tmp # element-wise product\n self.b_vector = b_vector.T\n\n @multi_voxel_fit\n def fit(self, data):\n return GeneralizedQSamplingFit(self, data)\n\n\nclass GeneralizedQSamplingFit(OdfFit):\n\n def __init__(self, model, data):\n \"\"\" Calculates PDF and ODF for a single voxel\n\n Parameters\n ----------\n model : object,\n DiffusionSpectrumModel\n data : 1d ndarray,\n signal values\n\n \"\"\"\n OdfFit.__init__(self, model, data)\n self._gfa = None\n self.npeaks = 5\n self._peak_values = None\n self._peak_indices = None\n self._qa = None\n\n def odf(self, sphere):\n \"\"\" Calculates the discrete ODF for a given discrete sphere.\n \"\"\"\n self.gqi_vector = self.model.cache_get('gqi_vector', key=sphere)\n if self.gqi_vector is None:\n if self.model.method == 'gqi2':\n H = squared_radial_component\n # print self.gqi_vector.shape\n self.gqi_vector = np.real(H(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda))\n if self.model.method == 'standard':\n self.gqi_vector = np.real(np.sinc(np.dot(\n self.model.b_vector, sphere.vertices.T) *\n self.model.Lambda / np.pi))\n self.model.cache_set('gqi_vector', sphere, self.gqi_vector)\n\n return np.dot(self.data, self.gqi_vector)\n\n\ndef normalize_qa(qa, max_qa=None):\n \"\"\" Normalize quantitative anisotropy.\n\n Used mostly with GQI rather than GQI2.\n\n Parameters\n ----------\n qa : array, shape (X, Y, Z, N)\n where N is the maximum number of peaks stored\n max_qa : float,\n maximum qa value. Usually found in the CSF (corticospinal fluid).\n\n Returns\n -------\n nqa : array, shape (x, Y, Z, N)\n normalized quantitative anisotropy\n\n Notes\n -----\n Normalized quantitative anisotropy has the very useful property\n to be very small near gray matter and background areas. Therefore,\n it can be used to mask out white matter areas.\n\n \"\"\"\n if max_qa is None:\n return qa / qa.max()\n return qa / max_qa\n\n\ndef squared_radial_component(x, tol=0.01):\n \"\"\" Part of the GQI2 integral\n\n Eq.8 in the referenced paper by Yeh et al. 2010\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n result = (2 * x * np.cos(x) + (x * x - 2) * np.sin(x)) / (x ** 3)\n x_near_zero = (x < tol) & (x > -tol)\n return np.where(x_near_zero, 1./3, result)\n\n\ndef npa(self, odf, width=5):\n \"\"\" non-parametric anisotropy\n\n Nimmo-Smith et al. ISMRM 2011\n \"\"\"\n # odf = self.odf(s)\n t0, t1, t2 = triple_odf_maxima(self.odf_vertices, odf, width)\n psi0 = t0[1] ** 2\n psi1 = t1[1] ** 2\n psi2 = t2[1] ** 2\n npa = (np.sqrt(\n (psi0 - psi1) ** 2 +\n (psi1 - psi2) ** 2 +\n (psi2 - psi0) ** 2) /\n np.sqrt(2 * (psi0 ** 2 + psi1 ** 2 + psi2 ** 2)))\n # print 'tom >>>> ',t0,t1,t2,npa\n\n return t0, t1, t2, npa\n\n\ndef equatorial_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial zone conjugate\n to 'pole' with width half 'width' degrees\n \"\"\"\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]\n\n\ndef polar_zone_vertices(vertices, pole, width=5):\n \"\"\"\n finds the 'vertices' in the equatorial band around\n the 'pole' of radius 'width' degrees\n \"\"\"\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]\n\n\ndef upper_hemi_map(v):\n \"\"\"\n maps a 3-vector into the z-upper hemisphere\n \"\"\"\n return np.sign(v[2])*v\n\n\ndef equatorial_maximum(vertices, odf, pole, width):\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n # need to test for whether eqvert is empty or not\n if len(eqvert) == 0:\n print('empty equatorial band at %s pole with width %f' %\n (np.array_str(pole), width))\n return None, None\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n\n return eqvertmax, eqvalmax\n\n\ndef patch_vertices(vertices, pole, width):\n \"\"\"\n find 'vertices' within the cone of 'width' degrees around 'pole'\n \"\"\"\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) > np.abs(np.cos(np.pi * width / 180))]\n\n\ndef patch_maximum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n # need to test for whether eqvert is empty or not\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' %\n (np.array_str(pole), width))\n return np.Null, np.Null\n eqvals = [odf[i] for i in eqvert]\n eqargmax = np.argmax(eqvals)\n eqvertmax = eqvert[eqargmax]\n eqvalmax = eqvals[eqargmax]\n return eqvertmax, eqvalmax\n\n\ndef odf_sum(odf):\n return np.sum(odf)\n\n\ndef patch_sum(vertices, odf, pole, width):\n eqvert = patch_vertices(vertices, pole, width)\n # need to test for whether eqvert is empty or not\n if len(eqvert) == 0:\n print('empty cone around pole %s with with width %f' %\n (np.array_str(pole), width))\n return np.Null\n return np.sum([odf[i] for i in eqvert])\n\n\ndef triple_odf_maxima(vertices, odf, width):\n\n indmax1 = np.argmax([odf[i] for i, v in enumerate(vertices)])\n odfmax1 = odf[indmax1]\n pole = vertices[indmax1]\n eqvert = equatorial_zone_vertices(vertices, pole, width)\n indmax2, odfmax2 = equatorial_maximum(vertices, odf, pole, width)\n indmax3 = eqvert[np.argmin([np.abs(np.dot(vertices[indmax2], vertices[p]))\n for p in eqvert])]\n odfmax3 = odf[indmax3]\n \"\"\"\n cross12 = np.cross(vertices[indmax1],vertices[indmax2])\n cross12 = cross12/np.sqrt(np.sum(cross12**2))\n indmax3, odfmax3 = patch_maximum(vertices, odf, cross12, 2*width)\n \"\"\"\n return [(indmax1, odfmax1), (indmax2, odfmax2), (indmax3, odfmax3)]\n",
"step-ids": [
14,
16,
17,
19,
20
]
}
|
[
14,
16,
17,
19,
20
] |
import docker
import logging
import sys
if __name__ == '__main__':
# setting up logger
logging.basicConfig(stream=sys.stdout,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
level=logging.DEBUG)
# get the docker client
client = docker.from_env()
# list out docker volumes
logging.info(str([x.name for x in client.volumes.list()]))
# Check if airflow backend volume is created or not
# if the volume is not created then create it
if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:
client.volumes.create('airflow_pg_data')
# kill container if it is already running
logging.info(str([x.name for x in client.containers.list()]))
if 'airflow_pg' not in [x.name for x in client.containers.list()]:
# launch postgres backend
pg = client.containers.run(image='postgres',
name='airflow_pg',
auto_remove=True,
detach=True,
environment={
'POSTGRES_PASSWORD': 'airflow',
'POSTGRES_USER': 'airflow',
'PGDATA': '/airflow/data'
},
volumes={'airflow_pg_data': {'bind': '/airflow/data', 'mode': 'rw'}},
ports={'5432/tcp': 5432}
)
|
normal
|
{
"blob_id": "a5c9ff1fe250310216e2eaa7a6ff5cc76fc10f94",
"index": 4324,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n client = docker.from_env()\n logging.info(str([x.name for x in client.volumes.list()]))\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n pg = client.containers.run(image='postgres', name='airflow_pg',\n auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':\n 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':\n '/airflow/data'}, volumes={'airflow_pg_data': {'bind':\n '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})\n",
"step-3": "import docker\nimport logging\nimport sys\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n client = docker.from_env()\n logging.info(str([x.name for x in client.volumes.list()]))\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n pg = client.containers.run(image='postgres', name='airflow_pg',\n auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':\n 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':\n '/airflow/data'}, volumes={'airflow_pg_data': {'bind':\n '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})\n",
"step-4": "import docker\nimport logging\nimport sys\n\nif __name__ == '__main__':\n\n # setting up logger\n logging.basicConfig(stream=sys.stdout,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n\n # get the docker client\n client = docker.from_env()\n\n # list out docker volumes\n logging.info(str([x.name for x in client.volumes.list()]))\n\n # Check if airflow backend volume is created or not\n # if the volume is not created then create it\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n\n # kill container if it is already running\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n\n # launch postgres backend\n pg = client.containers.run(image='postgres',\n name='airflow_pg',\n auto_remove=True,\n detach=True,\n environment={\n 'POSTGRES_PASSWORD': 'airflow',\n 'POSTGRES_USER': 'airflow',\n 'PGDATA': '/airflow/data'\n },\n volumes={'airflow_pg_data': {'bind': '/airflow/data', 'mode': 'rw'}},\n ports={'5432/tcp': 5432}\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Images(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
image = models.CharField(max_length=255, null=True, blank=True,
verbose_name='图片链接')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度图片'
class Youkus(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
video = models.CharField(max_length=255, null=True, blank=True,
verbose_name='视频分享地址')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '优酷视频'
class Tags(models.Model):
name = models.CharField(max_length=255, verbose_name='标签名称')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '标签管理'
class TagMaps(models.Model):
article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)
tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)
class ScrapyRules(models.Model):
name = models.CharField(max_length=255, verbose_name='爬取网站')
url = models.CharField(max_length=255, verbose_name='爬取链接')
search_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='搜索爬取规则')
title_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题爬取规则')
content_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='内容爬取规则')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '链接管理'
class ScrapyLogs(models.Model):
url = models.CharField(max_length=255, verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='页面标题')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫日志'
class ScrapyTasks(models.Model):
SHIRT_SIZES = ('0', '否'), ('1', '是')
CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (
'3', '按指定时间每月执行')
start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')
start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')
task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,
verbose_name='定时方式')
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,
verbose_name='是否包含已采集')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫任务'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Articles(models.Model):
wordroot = models.CharField(max_length=255, verbose_name='词根')
title = models.CharField(max_length=255, verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容组合')
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,
verbose_name='分类名称')
image = models.ImageField(max_length=255, null=True, blank=True,
verbose_name='图片')
video = models.FileField(max_length=255, null=True, blank=True,
verbose_name='视频')
question = models.CharField(max_length=255, null=True, blank=True,
verbose_name='问答问题')
answer = models.TextField(null=True, blank=True, verbose_name='问答回答')
baike_title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='百科标题')
baike_content = models.TextField(null=True, blank=True, verbose_name='百科内容'
)
seo_keywords = models.CharField(max_length=255, null=True, blank=True,
verbose_name='关键词')
seo_description = models.CharField(max_length=255, null=True, blank=
True, verbose_name='描述')
click_count = models.IntegerField(default=0, verbose_name='点击')
order = models.IntegerField(default=0, verbose_name='排序')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '文章管理'
class Baikes(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度百科'
class Zhidaos(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度知道'
class Images(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
image = models.CharField(max_length=255, null=True, blank=True,
verbose_name='图片链接')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度图片'
class Youkus(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
video = models.CharField(max_length=255, null=True, blank=True,
verbose_name='视频分享地址')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '优酷视频'
class Tags(models.Model):
name = models.CharField(max_length=255, verbose_name='标签名称')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '标签管理'
class TagMaps(models.Model):
article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)
tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)
class ScrapyRules(models.Model):
name = models.CharField(max_length=255, verbose_name='爬取网站')
url = models.CharField(max_length=255, verbose_name='爬取链接')
search_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='搜索爬取规则')
title_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题爬取规则')
content_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='内容爬取规则')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '链接管理'
class ScrapyLogs(models.Model):
url = models.CharField(max_length=255, verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='页面标题')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫日志'
class ScrapyTasks(models.Model):
SHIRT_SIZES = ('0', '否'), ('1', '是')
CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (
'3', '按指定时间每月执行')
start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')
start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')
task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,
verbose_name='定时方式')
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,
verbose_name='是否包含已采集')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫任务'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Wordroots(models.Model):
SHIRT_SIZES = (0, '否'), (1, '是')
name = models.CharField(max_length=255, verbose_name='词语')
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,
verbose_name='是否采集')
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,
verbose_name='分类名称')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '词库管理'
class Articles(models.Model):
wordroot = models.CharField(max_length=255, verbose_name='词根')
title = models.CharField(max_length=255, verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容组合')
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,
verbose_name='分类名称')
image = models.ImageField(max_length=255, null=True, blank=True,
verbose_name='图片')
video = models.FileField(max_length=255, null=True, blank=True,
verbose_name='视频')
question = models.CharField(max_length=255, null=True, blank=True,
verbose_name='问答问题')
answer = models.TextField(null=True, blank=True, verbose_name='问答回答')
baike_title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='百科标题')
baike_content = models.TextField(null=True, blank=True, verbose_name='百科内容'
)
seo_keywords = models.CharField(max_length=255, null=True, blank=True,
verbose_name='关键词')
seo_description = models.CharField(max_length=255, null=True, blank=
True, verbose_name='描述')
click_count = models.IntegerField(default=0, verbose_name='点击')
order = models.IntegerField(default=0, verbose_name='排序')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '文章管理'
class Baikes(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度百科'
class Zhidaos(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度知道'
class Images(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
image = models.CharField(max_length=255, null=True, blank=True,
verbose_name='图片链接')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度图片'
class Youkus(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
video = models.CharField(max_length=255, null=True, blank=True,
verbose_name='视频分享地址')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '优酷视频'
class Tags(models.Model):
name = models.CharField(max_length=255, verbose_name='标签名称')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '标签管理'
class TagMaps(models.Model):
article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)
tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)
class ScrapyRules(models.Model):
name = models.CharField(max_length=255, verbose_name='爬取网站')
url = models.CharField(max_length=255, verbose_name='爬取链接')
search_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='搜索爬取规则')
title_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题爬取规则')
content_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='内容爬取规则')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '链接管理'
class ScrapyLogs(models.Model):
url = models.CharField(max_length=255, verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='页面标题')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫日志'
class ScrapyTasks(models.Model):
SHIRT_SIZES = ('0', '否'), ('1', '是')
CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (
'3', '按指定时间每月执行')
start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')
start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')
task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,
verbose_name='定时方式')
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,
verbose_name='是否包含已采集')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫任务'
<|reserved_special_token_1|>
from django.db import models
import django.utils.timezone as timezone
class Categories(models.Model):
name = models.CharField(max_length=200, verbose_name='分类名称')
parent = models.ForeignKey('self', default=0, on_delete=models.
DO_NOTHING, null=True, blank=True, verbose_name='上级分类')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = '分类管理'
class Wordroots(models.Model):
SHIRT_SIZES = (0, '否'), (1, '是')
name = models.CharField(max_length=255, verbose_name='词语')
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,
verbose_name='是否采集')
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,
verbose_name='分类名称')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '词库管理'
class Articles(models.Model):
wordroot = models.CharField(max_length=255, verbose_name='词根')
title = models.CharField(max_length=255, verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容组合')
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,
verbose_name='分类名称')
image = models.ImageField(max_length=255, null=True, blank=True,
verbose_name='图片')
video = models.FileField(max_length=255, null=True, blank=True,
verbose_name='视频')
question = models.CharField(max_length=255, null=True, blank=True,
verbose_name='问答问题')
answer = models.TextField(null=True, blank=True, verbose_name='问答回答')
baike_title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='百科标题')
baike_content = models.TextField(null=True, blank=True, verbose_name='百科内容'
)
seo_keywords = models.CharField(max_length=255, null=True, blank=True,
verbose_name='关键词')
seo_description = models.CharField(max_length=255, null=True, blank=
True, verbose_name='描述')
click_count = models.IntegerField(default=0, verbose_name='点击')
order = models.IntegerField(default=0, verbose_name='排序')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '文章管理'
class Baikes(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度百科'
class Zhidaos(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题')
content = models.TextField(null=True, blank=True, verbose_name='内容')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度知道'
class Images(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
image = models.CharField(max_length=255, null=True, blank=True,
verbose_name='图片链接')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '百度图片'
class Youkus(models.Model):
wordroot_text = models.CharField(max_length=255, verbose_name='词根')
wordroot_id = models.IntegerField(default=0, null=True, blank=True,
verbose_name='词根id, 可空')
url = models.CharField(max_length=255, null=True, blank=True,
verbose_name='爬取链接')
video = models.CharField(max_length=255, null=True, blank=True,
verbose_name='视频分享地址')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '优酷视频'
class Tags(models.Model):
name = models.CharField(max_length=255, verbose_name='标签名称')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '标签管理'
class TagMaps(models.Model):
article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)
tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)
class ScrapyRules(models.Model):
name = models.CharField(max_length=255, verbose_name='爬取网站')
url = models.CharField(max_length=255, verbose_name='爬取链接')
search_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='搜索爬取规则')
title_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='标题爬取规则')
content_rules = models.CharField(max_length=255, null=True, blank=True,
verbose_name='内容爬取规则')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '链接管理'
class ScrapyLogs(models.Model):
url = models.CharField(max_length=255, verbose_name='爬取链接')
title = models.CharField(max_length=255, null=True, blank=True,
verbose_name='页面标题')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫日志'
class ScrapyTasks(models.Model):
SHIRT_SIZES = ('0', '否'), ('1', '是')
CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (
'3', '按指定时间每月执行')
start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')
start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')
task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,
verbose_name='定时方式')
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,
verbose_name='是否包含已采集')
created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'
)
updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'
)
class Meta:
verbose_name_plural = '爬虫任务'
<|reserved_special_token_1|>
from django.db import models
import django.utils.timezone as timezone
# Create your models here.
# Create your models here.
class Categories(models.Model):
# 文章分类
name = models.CharField(max_length=200, verbose_name = "分类名称")
parent = models.ForeignKey('self', default=0, on_delete=models.DO_NOTHING, null = True, blank = True, verbose_name = "上级分类")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
def __str__(self):
return self.name
class Meta:
verbose_name_plural = '分类管理'
class Wordroots(models.Model):
# 爬取的词
SHIRT_SIZES = (
(0, '否'),
(1, '是'),
)
name = models.CharField(max_length=255, verbose_name = "词语")
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES, verbose_name = "是否采集")
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING, verbose_name = "分类名称")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '词库管理'
class Articles(models.Model):
# 抓取的数据组合的文章
wordroot = models.CharField(max_length=255, verbose_name = "词根")
title = models.CharField(max_length=255, verbose_name = "标题")
content = models.TextField(null = True, blank = True, verbose_name = "内容组合")
category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING, verbose_name = "分类名称")
image = models.ImageField(max_length=255, null = True, blank = True, verbose_name = "图片")
video = models.FileField(max_length=255, null = True, blank = True, verbose_name = "视频")
question = models.CharField(max_length=255, null = True, blank = True, verbose_name = "问答问题")
answer = models.TextField(null = True, blank = True, verbose_name = "问答回答")
baike_title = models.CharField(max_length=255, null = True, blank = True, verbose_name = "百科标题")
baike_content = models.TextField(null = True, blank = True, verbose_name = "百科内容")
seo_keywords = models.CharField(max_length=255, null = True, blank = True, verbose_name = "关键词")
seo_description = models.CharField(max_length=255, null = True, blank = True, verbose_name = "描述")
click_count = models.IntegerField(default=0, verbose_name = "点击")
order = models.IntegerField(default=0, verbose_name = "排序")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '文章管理'
class Baikes(models.Model):
# 百科
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
title = models.CharField(max_length=255, null = True, blank = True, verbose_name = "标题")
content = models.TextField(null = True, blank = True, verbose_name = "内容")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '百度百科'
class Zhidaos(models.Model):
# 知道
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
title = models.CharField(max_length=255, null = True, blank = True, verbose_name = "标题")
content = models.TextField(null = True, blank = True, verbose_name = "内容")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '百度知道'
class Images(models.Model):
# 图片
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
image = models.CharField(max_length=255, null = True, blank = True, verbose_name = "图片链接")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '百度图片'
class Youkus(models.Model):
# 优酷
wordroot_text = models.CharField(max_length=255, verbose_name = "词根")
wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = "词根id, 可空")
url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "爬取链接")
video = models.CharField(max_length=255, null = True, blank = True, verbose_name = "视频分享地址")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '优酷视频'
class Tags(models.Model):
#标签
name = models.CharField(max_length=255, verbose_name = "标签名称")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = '标签管理'
class TagMaps(models.Model):
article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)
tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)
class ScrapyRules(models.Model):
# 爬虫链接及规则
name = models.CharField(max_length = 255, verbose_name = "爬取网站")
url = models.CharField(max_length = 255, verbose_name = "爬取链接")
search_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "搜索爬取规则")
title_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "标题爬取规则")
content_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "内容爬取规则")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = "链接管理"
class ScrapyLogs(models.Model):
# 爬虫日志
url = models.CharField(max_length = 255, verbose_name = "爬取链接")
title = models.CharField(max_length = 255, null = True, blank = True, verbose_name = "页面标题")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = "爬虫日志"
class ScrapyTasks(models.Model):
# 定时任务
# 爬取的词
SHIRT_SIZES = (
('0', '否'),
('1', '是'),
)
CYCLE_TYPE = (
('0', '按指定日期时间执行'),
('1', '按指定时间每天执行'),
('2', '按指定时间每周执行'),
('3', '按指定时间每月执行'),
)
start_date_at = models.DateField(default = timezone.now, verbose_name = "开始时间")
start_time_at = models.TimeField(default = timezone.now, verbose_name = "开始时间")
task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE, verbose_name = "定时方式")
is_done = models.IntegerField(default=0, choices=SHIRT_SIZES, verbose_name = "是否包含已采集")
created_at = models.DateTimeField(default = timezone.now, verbose_name = "添加日期")
updated_at = models.DateTimeField(default = timezone.now, verbose_name = "修改日期")
class Meta:
verbose_name_plural = "爬虫任务"
|
flexible
|
{
"blob_id": "512a13084a860e2784020664a3d5824d9dace6db",
"index": 7764,
"step-1": "<mask token>\n\n\nclass Images(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n image = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='图片链接')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度图片'\n\n\nclass Youkus(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n video = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='视频分享地址')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '优酷视频'\n\n\nclass Tags(models.Model):\n name = models.CharField(max_length=255, verbose_name='标签名称')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '标签管理'\n\n\nclass TagMaps(models.Model):\n article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)\n tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)\n\n\nclass ScrapyRules(models.Model):\n name = models.CharField(max_length=255, verbose_name='爬取网站')\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n search_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='搜索爬取规则')\n title_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题爬取规则')\n content_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='内容爬取规则')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '链接管理'\n\n\nclass ScrapyLogs(models.Model):\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='页面标题')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫日志'\n\n\nclass ScrapyTasks(models.Model):\n SHIRT_SIZES = ('0', '否'), ('1', '是')\n CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (\n '3', '按指定时间每月执行')\n start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')\n start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')\n task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,\n verbose_name='定时方式')\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,\n verbose_name='是否包含已采集')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫任务'\n",
"step-2": "<mask token>\n\n\nclass Articles(models.Model):\n wordroot = models.CharField(max_length=255, verbose_name='词根')\n title = models.CharField(max_length=255, verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容组合')\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,\n verbose_name='分类名称')\n image = models.ImageField(max_length=255, null=True, blank=True,\n verbose_name='图片')\n video = models.FileField(max_length=255, null=True, blank=True,\n verbose_name='视频')\n question = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='问答问题')\n answer = models.TextField(null=True, blank=True, verbose_name='问答回答')\n baike_title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='百科标题')\n baike_content = models.TextField(null=True, blank=True, verbose_name='百科内容'\n )\n seo_keywords = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='关键词')\n seo_description = models.CharField(max_length=255, null=True, blank=\n True, verbose_name='描述')\n click_count = models.IntegerField(default=0, verbose_name='点击')\n order = models.IntegerField(default=0, verbose_name='排序')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '文章管理'\n\n\nclass Baikes(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度百科'\n\n\nclass Zhidaos(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度知道'\n\n\nclass Images(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n image = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='图片链接')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度图片'\n\n\nclass Youkus(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n video = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='视频分享地址')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '优酷视频'\n\n\nclass Tags(models.Model):\n name = models.CharField(max_length=255, verbose_name='标签名称')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '标签管理'\n\n\nclass TagMaps(models.Model):\n article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)\n tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)\n\n\nclass ScrapyRules(models.Model):\n name = models.CharField(max_length=255, verbose_name='爬取网站')\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n search_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='搜索爬取规则')\n title_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题爬取规则')\n content_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='内容爬取规则')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '链接管理'\n\n\nclass ScrapyLogs(models.Model):\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='页面标题')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫日志'\n\n\nclass ScrapyTasks(models.Model):\n SHIRT_SIZES = ('0', '否'), ('1', '是')\n CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (\n '3', '按指定时间每月执行')\n start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')\n start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')\n task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,\n verbose_name='定时方式')\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,\n verbose_name='是否包含已采集')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫任务'\n",
"step-3": "<mask token>\n\n\nclass Wordroots(models.Model):\n SHIRT_SIZES = (0, '否'), (1, '是')\n name = models.CharField(max_length=255, verbose_name='词语')\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,\n verbose_name='是否采集')\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,\n verbose_name='分类名称')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '词库管理'\n\n\nclass Articles(models.Model):\n wordroot = models.CharField(max_length=255, verbose_name='词根')\n title = models.CharField(max_length=255, verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容组合')\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,\n verbose_name='分类名称')\n image = models.ImageField(max_length=255, null=True, blank=True,\n verbose_name='图片')\n video = models.FileField(max_length=255, null=True, blank=True,\n verbose_name='视频')\n question = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='问答问题')\n answer = models.TextField(null=True, blank=True, verbose_name='问答回答')\n baike_title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='百科标题')\n baike_content = models.TextField(null=True, blank=True, verbose_name='百科内容'\n )\n seo_keywords = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='关键词')\n seo_description = models.CharField(max_length=255, null=True, blank=\n True, verbose_name='描述')\n click_count = models.IntegerField(default=0, verbose_name='点击')\n order = models.IntegerField(default=0, verbose_name='排序')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '文章管理'\n\n\nclass Baikes(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度百科'\n\n\nclass Zhidaos(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度知道'\n\n\nclass Images(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n image = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='图片链接')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度图片'\n\n\nclass Youkus(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n video = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='视频分享地址')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '优酷视频'\n\n\nclass Tags(models.Model):\n name = models.CharField(max_length=255, verbose_name='标签名称')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '标签管理'\n\n\nclass TagMaps(models.Model):\n article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)\n tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)\n\n\nclass ScrapyRules(models.Model):\n name = models.CharField(max_length=255, verbose_name='爬取网站')\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n search_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='搜索爬取规则')\n title_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题爬取规则')\n content_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='内容爬取规则')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '链接管理'\n\n\nclass ScrapyLogs(models.Model):\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='页面标题')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫日志'\n\n\nclass ScrapyTasks(models.Model):\n SHIRT_SIZES = ('0', '否'), ('1', '是')\n CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (\n '3', '按指定时间每月执行')\n start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')\n start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')\n task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,\n verbose_name='定时方式')\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,\n verbose_name='是否包含已采集')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫任务'\n",
"step-4": "from django.db import models\nimport django.utils.timezone as timezone\n\n\nclass Categories(models.Model):\n name = models.CharField(max_length=200, verbose_name='分类名称')\n parent = models.ForeignKey('self', default=0, on_delete=models.\n DO_NOTHING, null=True, blank=True, verbose_name='上级分类')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name_plural = '分类管理'\n\n\nclass Wordroots(models.Model):\n SHIRT_SIZES = (0, '否'), (1, '是')\n name = models.CharField(max_length=255, verbose_name='词语')\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,\n verbose_name='是否采集')\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,\n verbose_name='分类名称')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '词库管理'\n\n\nclass Articles(models.Model):\n wordroot = models.CharField(max_length=255, verbose_name='词根')\n title = models.CharField(max_length=255, verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容组合')\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING,\n verbose_name='分类名称')\n image = models.ImageField(max_length=255, null=True, blank=True,\n verbose_name='图片')\n video = models.FileField(max_length=255, null=True, blank=True,\n verbose_name='视频')\n question = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='问答问题')\n answer = models.TextField(null=True, blank=True, verbose_name='问答回答')\n baike_title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='百科标题')\n baike_content = models.TextField(null=True, blank=True, verbose_name='百科内容'\n )\n seo_keywords = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='关键词')\n seo_description = models.CharField(max_length=255, null=True, blank=\n True, verbose_name='描述')\n click_count = models.IntegerField(default=0, verbose_name='点击')\n order = models.IntegerField(default=0, verbose_name='排序')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '文章管理'\n\n\nclass Baikes(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度百科'\n\n\nclass Zhidaos(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题')\n content = models.TextField(null=True, blank=True, verbose_name='内容')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度知道'\n\n\nclass Images(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n image = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='图片链接')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '百度图片'\n\n\nclass Youkus(models.Model):\n wordroot_text = models.CharField(max_length=255, verbose_name='词根')\n wordroot_id = models.IntegerField(default=0, null=True, blank=True,\n verbose_name='词根id, 可空')\n url = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='爬取链接')\n video = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='视频分享地址')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '优酷视频'\n\n\nclass Tags(models.Model):\n name = models.CharField(max_length=255, verbose_name='标签名称')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '标签管理'\n\n\nclass TagMaps(models.Model):\n article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)\n tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)\n\n\nclass ScrapyRules(models.Model):\n name = models.CharField(max_length=255, verbose_name='爬取网站')\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n search_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='搜索爬取规则')\n title_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='标题爬取规则')\n content_rules = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='内容爬取规则')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '链接管理'\n\n\nclass ScrapyLogs(models.Model):\n url = models.CharField(max_length=255, verbose_name='爬取链接')\n title = models.CharField(max_length=255, null=True, blank=True,\n verbose_name='页面标题')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫日志'\n\n\nclass ScrapyTasks(models.Model):\n SHIRT_SIZES = ('0', '否'), ('1', '是')\n CYCLE_TYPE = ('0', '按指定日期时间执行'), ('1', '按指定时间每天执行'), ('2', '按指定时间每周执行'), (\n '3', '按指定时间每月执行')\n start_date_at = models.DateField(default=timezone.now, verbose_name='开始时间')\n start_time_at = models.TimeField(default=timezone.now, verbose_name='开始时间')\n task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE,\n verbose_name='定时方式')\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES,\n verbose_name='是否包含已采集')\n created_at = models.DateTimeField(default=timezone.now, verbose_name='添加日期'\n )\n updated_at = models.DateTimeField(default=timezone.now, verbose_name='修改日期'\n )\n\n\n class Meta:\n verbose_name_plural = '爬虫任务'\n",
"step-5": "from django.db import models\nimport django.utils.timezone as timezone\n\n# Create your models here.\n\n# Create your models here.\nclass Categories(models.Model):\n # 文章分类\n name = models.CharField(max_length=200, verbose_name = \"分类名称\")\n parent = models.ForeignKey('self', default=0, on_delete=models.DO_NOTHING, null = True, blank = True, verbose_name = \"上级分类\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name_plural = '分类管理'\n\n\nclass Wordroots(models.Model):\n # 爬取的词\n SHIRT_SIZES = (\n (0, '否'),\n (1, '是'),\n )\n name = models.CharField(max_length=255, verbose_name = \"词语\")\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES, verbose_name = \"是否采集\")\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING, verbose_name = \"分类名称\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n\n class Meta:\n verbose_name_plural = '词库管理'\n\n\nclass Articles(models.Model):\n # 抓取的数据组合的文章\n wordroot = models.CharField(max_length=255, verbose_name = \"词根\")\n title = models.CharField(max_length=255, verbose_name = \"标题\")\n content = models.TextField(null = True, blank = True, verbose_name = \"内容组合\")\n category = models.ForeignKey(Categories, on_delete=models.DO_NOTHING, verbose_name = \"分类名称\")\n image = models.ImageField(max_length=255, null = True, blank = True, verbose_name = \"图片\")\n video = models.FileField(max_length=255, null = True, blank = True, verbose_name = \"视频\")\n question = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"问答问题\")\n answer = models.TextField(null = True, blank = True, verbose_name = \"问答回答\")\n baike_title = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"百科标题\")\n baike_content = models.TextField(null = True, blank = True, verbose_name = \"百科内容\")\n seo_keywords = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"关键词\")\n seo_description = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"描述\")\n click_count = models.IntegerField(default=0, verbose_name = \"点击\")\n order = models.IntegerField(default=0, verbose_name = \"排序\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = '文章管理'\n\n \nclass Baikes(models.Model):\n # 百科\n wordroot_text = models.CharField(max_length=255, verbose_name = \"词根\")\n wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = \"词根id, 可空\")\n url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"爬取链接\")\n title = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"标题\")\n content = models.TextField(null = True, blank = True, verbose_name = \"内容\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = '百度百科'\n\n\nclass Zhidaos(models.Model):\n # 知道\n wordroot_text = models.CharField(max_length=255, verbose_name = \"词根\")\n wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = \"词根id, 可空\")\n url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"爬取链接\")\n title = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"标题\")\n content = models.TextField(null = True, blank = True, verbose_name = \"内容\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = '百度知道'\n\n\nclass Images(models.Model):\n # 图片\n wordroot_text = models.CharField(max_length=255, verbose_name = \"词根\")\n wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = \"词根id, 可空\")\n url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"爬取链接\")\n image = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"图片链接\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = '百度图片'\n\n\nclass Youkus(models.Model):\n # 优酷\n wordroot_text = models.CharField(max_length=255, verbose_name = \"词根\")\n wordroot_id = models.IntegerField(default=0, null = True, blank = True, verbose_name = \"词根id, 可空\")\n url = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"爬取链接\")\n video = models.CharField(max_length=255, null = True, blank = True, verbose_name = \"视频分享地址\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = '优酷视频'\n\nclass Tags(models.Model):\n #标签\n name = models.CharField(max_length=255, verbose_name = \"标签名称\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = '标签管理'\n\nclass TagMaps(models.Model):\n article = models.ForeignKey(Articles, on_delete=models.DO_NOTHING)\n tag = models.ForeignKey(Tags, on_delete=models.DO_NOTHING)\n\n\nclass ScrapyRules(models.Model):\n # 爬虫链接及规则\n name = models.CharField(max_length = 255, verbose_name = \"爬取网站\")\n url = models.CharField(max_length = 255, verbose_name = \"爬取链接\")\n search_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"搜索爬取规则\")\n title_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"标题爬取规则\")\n content_rules = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"内容爬取规则\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = \"链接管理\"\n\n\nclass ScrapyLogs(models.Model):\n # 爬虫日志\n url = models.CharField(max_length = 255, verbose_name = \"爬取链接\")\n title = models.CharField(max_length = 255, null = True, blank = True, verbose_name = \"页面标题\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = \"爬虫日志\"\n\n\nclass ScrapyTasks(models.Model):\n # 定时任务\n # 爬取的词\n SHIRT_SIZES = (\n ('0', '否'),\n ('1', '是'),\n )\n CYCLE_TYPE = (\n ('0', '按指定日期时间执行'),\n ('1', '按指定时间每天执行'),\n ('2', '按指定时间每周执行'),\n ('3', '按指定时间每月执行'),\n )\n start_date_at = models.DateField(default = timezone.now, verbose_name = \"开始时间\")\n start_time_at = models.TimeField(default = timezone.now, verbose_name = \"开始时间\")\n task_cycle = models.IntegerField(default=0, choices=CYCLE_TYPE, verbose_name = \"定时方式\")\n is_done = models.IntegerField(default=0, choices=SHIRT_SIZES, verbose_name = \"是否包含已采集\")\n created_at = models.DateTimeField(default = timezone.now, verbose_name = \"添加日期\")\n updated_at = models.DateTimeField(default = timezone.now, verbose_name = \"修改日期\")\n\n class Meta:\n verbose_name_plural = \"爬虫任务\"\n\n",
"step-ids": [
14,
20,
22,
26,
27
]
}
|
[
14,
20,
22,
26,
27
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = create_app(Config)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
<|reserved_special_token_1|>
from app import create_app
from app.config import Config
app = create_app(Config)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
<|reserved_special_token_1|>
from app import create_app
from app.config import Config
app = create_app(Config)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
|
flexible
|
{
"blob_id": "bea90bbcd4d34b64c21f022b6f3af2bee2d978e4",
"index": 1123,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n",
"step-3": "<mask token>\napp = create_app(Config)\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n",
"step-4": "from app import create_app\nfrom app.config import Config\napp = create_app(Config)\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n",
"step-5": "from app import create_app\nfrom app.config import Config\n\n\napp = create_app(Config)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000, debug=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.2.6 on 2020-06-18 14:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gestionadmin', '0133_auto_20200618_1339'),
]
operations = [
migrations.RemoveField(
model_name='comprasenc',
name='empleado',
),
]
|
normal
|
{
"blob_id": "f96a7bef48e7df2899343029a2fae9697125a5b2",
"index": 5203,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('gestionadmin', '0133_auto_20200618_1339')]\n operations = [migrations.RemoveField(model_name='comprasenc', name=\n 'empleado')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('gestionadmin', '0133_auto_20200618_1339')]\n operations = [migrations.RemoveField(model_name='comprasenc', name=\n 'empleado')]\n",
"step-5": "# Generated by Django 2.2.6 on 2020-06-18 14:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gestionadmin', '0133_auto_20200618_1339'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='comprasenc',\n name='empleado',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def save2txt(songname, lyric, path):
print('歌词下载完成:' + songname)
lyric_path = path + '\\' + songname + '.txt'
with open(lyric_path, 'a', encoding='utf-8') as f:
f.write(lyric)
<|reserved_special_token_0|>
def get_lyrics(songids):
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(
songids)
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile('\\[.*\\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
<|reserved_special_token_0|>
def get_singer(url):
chrome_driver = 'D:\\software\\chromedriver_win32\\chromedriver.exe'
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5)
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1
].split("'")[0]
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
<|reserved_special_token_0|>
def lyrics_from_singername(name, path):
id = get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path, songname):
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
<|reserved_special_token_0|>
def lyrics_from_singerid(id, path):
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def http_get(api):
my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',
'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure':
False, 'expires': None, 'discard': True, 'comment': None,
'comment_url': None, 'rest': {}, 'rfc2109': False}
s = requests.Session()
s.headers.update({'Referer': 'http://music.163.com/'})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id):
size = '720'
api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'
json_data = http_get(api)
if json_data['code'] == 200:
a = list(json_data['data']['brs'].keys())
if size not in a:
size = a[0]
mvurl = json_data['data']['brs'][size]
artist = json_data['data']['artistName']
song = json_data['data']['name']
filename = '%s/[%s]%s.mp4' % (artist, size, song)
if os.path.exists(filename) == False:
if os.path.exists(artist) == False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 100.0 / totalsize
s = '\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)
), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize:
sys.stderr.write('\n')
else:
sys.stderr.write('read %d\n' % (readsofar,))
print('downloading ' + filename)
urlretrieve(mvurl, filename, reporthook)
<|reserved_special_token_0|>
def _content_generator(music_id):
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':
'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':
'music.163.com', 'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1', 'Cookie':
'__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'
, 'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
limit = 20
offset = 0
compiler = re.compile(
'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')
while True:
params = {'limit': limit, 'offset': offset}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud:
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id
self.mask = mask
self.font_path = font_path
if not stop_words is None:
self.stop_words += stop_words
self.img_wordcloud = None
def _cut_word(self, comment):
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self):
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {'background_color': 'white', 'width': 1000,
'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':
wordcloud.STOPWORDS}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self):
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename):
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save2txt(songname, lyric, path):
print('歌词下载完成:' + songname)
lyric_path = path + '\\' + songname + '.txt'
with open(lyric_path, 'a', encoding='utf-8') as f:
f.write(lyric)
<|reserved_special_token_0|>
def get_lyrics(songids):
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(
songids)
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile('\\[.*\\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
def lyrics_from_list(url, path):
new_url = url.replace('/#', '')
header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, 'html.parser')
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
songids = music_dict.keys()
for i in songids:
lyric = get_lyrics(i)
save2txt(music_dict[i], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def get_singer(url):
chrome_driver = 'D:\\software\\chromedriver_win32\\chromedriver.exe'
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5)
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1
].split("'")[0]
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
<|reserved_special_token_0|>
def download_singer():
idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001,
6002, 6003, 7001, 7002, 7003]
for id in idlist:
url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'
.format(id))
save2csv(url)
<|reserved_special_token_0|>
def get_html(url):
proxy_addr = {'http': '61.135.217.7:80'}
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
try:
html = requests.get(url, headers=headers, proxies=proxy_addr).text
return html
except BaseException:
print('request error')
pass
<|reserved_special_token_0|>
def lyrics_from_singername(name, path):
id = get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path, songname):
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
<|reserved_special_token_0|>
def lyrics_from_singerid(id, path):
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def http_get(api):
my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',
'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure':
False, 'expires': None, 'discard': True, 'comment': None,
'comment_url': None, 'rest': {}, 'rfc2109': False}
s = requests.Session()
s.headers.update({'Referer': 'http://music.163.com/'})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id):
size = '720'
api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'
json_data = http_get(api)
if json_data['code'] == 200:
a = list(json_data['data']['brs'].keys())
if size not in a:
size = a[0]
mvurl = json_data['data']['brs'][size]
artist = json_data['data']['artistName']
song = json_data['data']['name']
filename = '%s/[%s]%s.mp4' % (artist, size, song)
if os.path.exists(filename) == False:
if os.path.exists(artist) == False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 100.0 / totalsize
s = '\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)
), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize:
sys.stderr.write('\n')
else:
sys.stderr.write('read %d\n' % (readsofar,))
print('downloading ' + filename)
urlretrieve(mvurl, filename, reporthook)
<|reserved_special_token_0|>
def _content_generator(music_id):
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':
'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':
'music.163.com', 'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1', 'Cookie':
'__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'
, 'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
limit = 20
offset = 0
compiler = re.compile(
'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')
while True:
params = {'limit': limit, 'offset': offset}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud:
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id
self.mask = mask
self.font_path = font_path
if not stop_words is None:
self.stop_words += stop_words
self.img_wordcloud = None
def _cut_word(self, comment):
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self):
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {'background_color': 'white', 'width': 1000,
'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':
wordcloud.STOPWORDS}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self):
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename):
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save2txt(songname, lyric, path):
print('歌词下载完成:' + songname)
lyric_path = path + '\\' + songname + '.txt'
with open(lyric_path, 'a', encoding='utf-8') as f:
f.write(lyric)
<|reserved_special_token_0|>
def get_lyrics(songids):
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(
songids)
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile('\\[.*\\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
def lyrics_from_list(url, path):
new_url = url.replace('/#', '')
header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, 'html.parser')
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
songids = music_dict.keys()
for i in songids:
lyric = get_lyrics(i)
save2txt(music_dict[i], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def get_singer(url):
chrome_driver = 'D:\\software\\chromedriver_win32\\chromedriver.exe'
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5)
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1
].split("'")[0]
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
<|reserved_special_token_0|>
def download_singer():
idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001,
6002, 6003, 7001, 7002, 7003]
for id in idlist:
url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'
.format(id))
save2csv(url)
<|reserved_special_token_0|>
def get_html(url):
proxy_addr = {'http': '61.135.217.7:80'}
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
try:
html = requests.get(url, headers=headers, proxies=proxy_addr).text
return html
except BaseException:
print('request error')
pass
def get_top50(html):
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.f-hide #song-list-pre-cache a')
songname = []
songids = []
for sn in info:
songnames = sn.getText()
songname.append(songnames)
for si in info:
songid = str(re.findall('href="(.*?)"', str(si))).strip().split('=')[-1
].split("'")[0]
songids.append(songid)
return zip(songname, songids)
def lyrics_from_singername(name, path):
id = get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path, songname):
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
<|reserved_special_token_0|>
def lyrics_from_singerid(id, path):
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def http_get(api):
my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',
'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure':
False, 'expires': None, 'discard': True, 'comment': None,
'comment_url': None, 'rest': {}, 'rfc2109': False}
s = requests.Session()
s.headers.update({'Referer': 'http://music.163.com/'})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id):
size = '720'
api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'
json_data = http_get(api)
if json_data['code'] == 200:
a = list(json_data['data']['brs'].keys())
if size not in a:
size = a[0]
mvurl = json_data['data']['brs'][size]
artist = json_data['data']['artistName']
song = json_data['data']['name']
filename = '%s/[%s]%s.mp4' % (artist, size, song)
if os.path.exists(filename) == False:
if os.path.exists(artist) == False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 100.0 / totalsize
s = '\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)
), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize:
sys.stderr.write('\n')
else:
sys.stderr.write('read %d\n' % (readsofar,))
print('downloading ' + filename)
urlretrieve(mvurl, filename, reporthook)
<|reserved_special_token_0|>
def _content_generator(music_id):
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':
'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':
'music.163.com', 'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1', 'Cookie':
'__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'
, 'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
limit = 20
offset = 0
compiler = re.compile(
'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')
while True:
params = {'limit': limit, 'offset': offset}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud:
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id
self.mask = mask
self.font_path = font_path
if not stop_words is None:
self.stop_words += stop_words
self.img_wordcloud = None
def _cut_word(self, comment):
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self):
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {'background_color': 'white', 'width': 1000,
'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':
wordcloud.STOPWORDS}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self):
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename):
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save2txt(songname, lyric, path):
print('歌词下载完成:' + songname)
lyric_path = path + '\\' + songname + '.txt'
with open(lyric_path, 'a', encoding='utf-8') as f:
f.write(lyric)
def single_song_lyric(song_id, path, song_name):
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(
song_id)
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile('\\[.*\\]')
lyric = re.sub(reg, '', initial_lyric).strip()
save2txt(song_name, lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def get_lyrics(songids):
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(
songids)
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile('\\[.*\\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
def lyrics_from_list(url, path):
new_url = url.replace('/#', '')
header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, 'html.parser')
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
songids = music_dict.keys()
for i in songids:
lyric = get_lyrics(i)
save2txt(music_dict[i], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def get_singer(url):
chrome_driver = 'D:\\software\\chromedriver_win32\\chromedriver.exe'
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5)
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1
].split("'")[0]
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
def get_data(url):
data = []
for singernames, singerids in get_singer(url):
info = {}
info['歌手名字'] = singernames
info['歌手ID'] = singerids
data.append(info)
return data
<|reserved_special_token_0|>
def download_singer():
idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001,
6002, 6003, 7001, 7002, 7003]
for id in idlist:
url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'
.format(id))
save2csv(url)
<|reserved_special_token_0|>
def get_html(url):
proxy_addr = {'http': '61.135.217.7:80'}
headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
try:
html = requests.get(url, headers=headers, proxies=proxy_addr).text
return html
except BaseException:
print('request error')
pass
def get_top50(html):
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.f-hide #song-list-pre-cache a')
songname = []
songids = []
for sn in info:
songnames = sn.getText()
songname.append(songnames)
for si in info:
songid = str(re.findall('href="(.*?)"', str(si))).strip().split('=')[-1
].split("'")[0]
songids.append(songid)
return zip(songname, songids)
def lyrics_from_singername(name, path):
id = get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path, songname):
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
def songs_from_singername(name, path):
id = get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(
songid)
songname = singer_info[0]
down_path = path + '\\' + songname + '.mp3'
save_song(songurl, down_path, songname)
time.sleep(1)
def lyrics_from_singerid(id, path):
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
<|reserved_special_token_0|>
def http_get(api):
my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',
'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure':
False, 'expires': None, 'discard': True, 'comment': None,
'comment_url': None, 'rest': {}, 'rfc2109': False}
s = requests.Session()
s.headers.update({'Referer': 'http://music.163.com/'})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id):
size = '720'
api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'
json_data = http_get(api)
if json_data['code'] == 200:
a = list(json_data['data']['brs'].keys())
if size not in a:
size = a[0]
mvurl = json_data['data']['brs'][size]
artist = json_data['data']['artistName']
song = json_data['data']['name']
filename = '%s/[%s]%s.mp4' % (artist, size, song)
if os.path.exists(filename) == False:
if os.path.exists(artist) == False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 100.0 / totalsize
s = '\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)
), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize:
sys.stderr.write('\n')
else:
sys.stderr.write('read %d\n' % (readsofar,))
print('downloading ' + filename)
urlretrieve(mvurl, filename, reporthook)
<|reserved_special_token_0|>
def _content_generator(music_id):
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
, 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':
'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':
'music.163.com', 'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1', 'Cookie':
'__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'
, 'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
limit = 20
offset = 0
compiler = re.compile(
'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')
while True:
params = {'limit': limit, 'offset': offset}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud:
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id
self.mask = mask
self.font_path = font_path
if not stop_words is None:
self.stop_words += stop_words
self.img_wordcloud = None
def _cut_word(self, comment):
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self):
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {'background_color': 'white', 'width': 1000,
'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':
wordcloud.STOPWORDS}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self):
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename):
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import json
import time
#功能一:下载单一歌曲、歌词
def single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称
song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
down_path = path +'\\'+ song_name + '.mp3'
urlretrieve(song_url,down_path)
print("歌曲下载完成:"+song_name)
def save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径
# print('正在保存歌曲:{}'.format(songname))
print("歌词下载完成:"+songname)
lyric_path=path+'\\'+songname+'.txt'
with open(lyric_path, 'a', encoding='utf-8')as f:
f.write(lyric)
def single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile(r'\[.*\]')
lyric = re.sub(reg, '', initial_lyric).strip()
save2txt(song_name, lyric, path)
time.sleep(1)
#功能二:根据歌单url下载
def songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜)
new_url = url.replace('/#', '')
header = {
'Host': 'music.163.com',
'Referer': 'https://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, "html.parser")
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
for song_id in music_dict:
song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
down_path=path+'\\'+music_dict[song_id]+'.mp3'
# path = "C:\\Users\\ming-\\Downloads\\%s.mp3" % music_dict[song_id]
# 添加数据
print( "正在下载:%s" % music_dict[song_id])
# text.see(END)
# text.update()
urlretrieve(song_url, down_path)
def get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile(r'\[.*\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
def lyrics_from_list(url,path): #根据歌单下载歌曲歌词
new_url = url.replace('/#', '')
header = {
'Host': 'music.163.com',
'Referer': 'https://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, "html.parser")
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
songids=music_dict.keys()
for i in songids:
lyric=get_lyrics(i)
save2txt(music_dict[i],lyric,path)
time.sleep(1)
#功能三:根据歌手下载
#获取歌手信息和id
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import csv
import re
# chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" #chromedriver的文件位置
# browser = webdriver.Chrome(executable_path = chrome_driver)
# wait = WebDriverWait(browser, 5) # 设置等待时间
def get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页
chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" # chromedriver的文件位置
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5) # 设置等待时间
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1].split('\'')[0] #正则表达式获取歌曲id
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
def get_data(url):
data = []
for singernames, singerids in get_singer(url):
info = {}
info['歌手名字'] = singernames
info['歌手ID'] = singerids
data.append(info)
return data
def save2csv(url):
print('保存歌手信息中...请稍后查看')
with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f:
# CSV 基本写入用 w,追加改模式 w 为 a
fieldnames = ['歌手名字', '歌手ID']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
data = get_data(url)
print(data)
writer.writerows(data)
print('保存成功')
def download_singer():
idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003]
for id in idlist:
url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id)
save2csv(url)
def get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名
file = "lib\\singer_info.csv"
with open(file, 'r',encoding='utf-8-sig') as f:
reader = csv.reader(f)
name = []
id = []
for i in reader:
name.append(i[0])
id.append(i[1])
a=name.index(singer_name)
return id[a]
#根据歌手姓名下载
def get_html(url): #通过代理获取网页信息,输入为指定网页url
proxy_addr = {'http': '61.135.217.7:80'}
# 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
try:
html = requests.get(url, headers=headers, proxies=proxy_addr).text
return html
except BaseException:
print('request error')
pass
def get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.f-hide #song-list-pre-cache a')
songname = []
songids = []
for sn in info:
songnames = sn.getText()
songname.append(songnames)
for si in info:
songid = str(re.findall('href="(.*?)"', str(si))).strip().split('=')[-1].split('\'')[0] # 用re查找,查找对象一定要是str类型
songids.append(songid)
return zip(songname, songids)
def lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词
id=get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
def songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径
id=get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)
songname = singer_info[0]
# path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3'
down_path=path+'\\'+songname+'.mp3'
save_song(songurl, down_path,songname)
time.sleep(1)
def lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)
songname = singer_info[0]
# path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3'
down_path = path + '\\' + songname + '.mp3'
save_song(songurl, down_path, songname)
time.sleep(1)
#功能四:下载mv
import requests
import os
import sys
from urllib.parse import urlparse,parse_qs
def http_get(api):
my_cookie = {
"version":0,
"name":'appver',
"value":'1.5.0.75771',
"port":None,
# "port_specified":False,
"domain":'www.mydomain.com',
# "domain_specified":False,
# "domain_initial_dot":False,
"path":'/',
# "path_specified":True,
"secure":False,
"expires":None,
"discard":True,
"comment":None,
"comment_url":None,
"rest":{},
"rfc2109":False
}
s = requests.Session()
s.headers.update({'Referer': "http://music.163.com/"})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id): #根据mvid下载
size = "720" #default 720p
api = "http://music.163.com/api/mv/detail?id="+str(id)+"&type=mp4"
json_data = http_get(api)
if json_data["code"]==200:
a = list(json_data["data"]["brs"].keys())
if size not in a:
size = a[0] #如果没有720p,则选择最小的版本
mvurl = json_data["data"]["brs"][size] #mv网址
artist = json_data["data"]["artistName"] #歌手信息
song = json_data["data"]["name"] #歌曲信息
filename = '%s/[%s]%s.mp4' %(artist,size,song)
if os.path.exists(filename)==False:
if os.path.exists(artist)==False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
print("downloading "+filename)
urlretrieve(mvurl,filename,reporthook)
def download_mv_from_list(url): #批量下载歌单的mv资源
input=url.replace("#","")
id = parse_qs(urlparse(input).query)["id"][0]
if "playlist" in input:
playlist_api = "http://music.163.com/api/playlist/detail?id=%s" % (id)
json_data = http_get(playlist_api)
for idx, mv in enumerate(json_data["result"]["tracks"]): #mv信息
download_single_mv(mv["mvid"])
print("downloaded:" + str(idx))
elif "album" in input:
playlist_api = "http://music.163.com/api/album/%s" % (id)
json_data = http_get(playlist_api)
for idx, mv in enumerate(json_data["album"]["songs"]):
if mv["mvid"] != None and mv["mvid"] != 0:
download_single_mv(mv["mvid"])
print("downloaded:" + str(idx))
download_single_mv(id)
#功能五:爬取歌曲评论并生成词云图
from jieba import posseg
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import wordcloud
def _content_generator(music_id): #根据歌曲id获取评论信息
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Host': 'music.163.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
}
limit = 20
offset = 0
compiler = re.compile(r'[^\u4E00-\u9FA5^\u3000-\u303F^\uFF00-\uFFEF^0-9^a-z^A-Z]')
while True:
params = {
'limit': limit,
'offset': offset,
}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud: #自定义类,生成词云图
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id #歌曲信息
self.mask = mask #背景图片
self.font_path = font_path #字体
if not stop_words is None:
self.stop_words+=stop_words
self.img_wordcloud = None
def _cut_word(self, comment): #分词
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {
'background_color': "white",
'width': 1000,
'height': 860,
'margin': 2,
'max_words': 50,
'stopwords': wordcloud.STOPWORDS,
}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self): #生成词云图
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename): #保存到本地
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
def get_wordcloud(music_id,mask,font,path): #执行函数
wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font)
wordcloud_obj.show_wordcloud()
result=path+'\\'+'result.jpg'
wordcloud_obj.to_file(result)
|
flexible
|
{
"blob_id": "3b11d514b15775e4c818a7a2adf9a80e89dca968",
"index": 5801,
"step-1": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\ndef get_top50(html):\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.f-hide #song-list-pre-cache a')\n songname = []\n songids = []\n for sn in info:\n songnames = sn.getText()\n songname.append(songnames)\n for si in info:\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1\n ].split(\"'\")[0]\n songids.append(songid)\n return zip(songname, songids)\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\ndef single_song_lyric(song_id, path, song_name):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n song_id)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n save2txt(song_name, lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\ndef get_data(url):\n data = []\n for singernames, singerids in get_singer(url):\n info = {}\n info['歌手名字'] = singernames\n info['歌手ID'] = singerids\n data.append(info)\n return data\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\ndef get_top50(html):\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.f-hide #song-list-pre-cache a')\n songname = []\n songids = []\n for sn in info:\n songnames = sn.getText()\n songname.append(songnames)\n for si in info:\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1\n ].split(\"'\")[0]\n songids.append(songid)\n return zip(songname, songids)\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\ndef songs_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n songid = singer_info[1]\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(\n songid)\n songname = singer_info[0]\n down_path = path + '\\\\' + songname + '.mp3'\n save_song(songurl, down_path, songname)\n time.sleep(1)\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-5": "import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlretrieve\r\nimport json\r\nimport time\r\n\r\n#功能一:下载单一歌曲、歌词\r\n\r\ndef single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称\r\n song_url = \"http://music.163.com/song/media/outer/url?id=%s\" % song_id\r\n down_path = path +'\\\\'+ song_name + '.mp3'\r\n urlretrieve(song_url,down_path)\r\n print(\"歌曲下载完成:\"+song_name)\r\n\r\ndef save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径\r\n # print('正在保存歌曲:{}'.format(songname))\r\n print(\"歌词下载完成:\"+songname)\r\n lyric_path=path+'\\\\'+songname+'.txt'\r\n with open(lyric_path, 'a', encoding='utf-8')as f:\r\n f.write(lyric)\r\n\r\ndef single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称\r\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id)\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n html = requests.get(url, headers=headers).text\r\n json_obj = json.loads(html)\r\n initial_lyric = json_obj['lrc']['lyric']\r\n reg = re.compile(r'\\[.*\\]')\r\n lyric = re.sub(reg, '', initial_lyric).strip()\r\n save2txt(song_name, lyric, path)\r\n time.sleep(1)\r\n\r\n\r\n#功能二:根据歌单url下载\r\n\r\ndef songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜)\r\n new_url = url.replace('/#', '')\r\n\r\n header = {\r\n 'Host': 'music.163.com',\r\n 'Referer': 'https://music.163.com/',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\r\n }\r\n\r\n res = requests.get(new_url, headers=header).text\r\n\r\n r = BeautifulSoup(res, \"html.parser\")\r\n music_dict = {}\r\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\r\n for music in result:\r\n print(music)\r\n music_id = music.get('href').strip('/song?id=')\r\n music_name = music.text\r\n music_dict[music_id] = music_name\r\n for song_id in music_dict:\r\n song_url = \"http://music.163.com/song/media/outer/url?id=%s\" % song_id\r\n down_path=path+'\\\\'+music_dict[song_id]+'.mp3'\r\n\r\n # path = \"C:\\\\Users\\\\ming-\\\\Downloads\\\\%s.mp3\" % music_dict[song_id]\r\n\r\n # 添加数据\r\n print( \"正在下载:%s\" % music_dict[song_id])\r\n # text.see(END)\r\n # text.update()\r\n\r\n urlretrieve(song_url, down_path)\r\n\r\ndef get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id\r\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids)\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n html = requests.get(url, headers=headers).text\r\n json_obj = json.loads(html)\r\n initial_lyric = json_obj['lrc']['lyric']\r\n reg = re.compile(r'\\[.*\\]')\r\n lyric = re.sub(reg, '', initial_lyric).strip()\r\n return lyric\r\n\r\ndef lyrics_from_list(url,path): #根据歌单下载歌曲歌词\r\n new_url = url.replace('/#', '')\r\n\r\n header = {\r\n 'Host': 'music.163.com',\r\n 'Referer': 'https://music.163.com/',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\r\n }\r\n\r\n res = requests.get(new_url, headers=header).text\r\n\r\n r = BeautifulSoup(res, \"html.parser\")\r\n music_dict = {}\r\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\r\n for music in result:\r\n print(music)\r\n music_id = music.get('href').strip('/song?id=')\r\n music_name = music.text\r\n music_dict[music_id] = music_name\r\n songids=music_dict.keys()\r\n for i in songids:\r\n lyric=get_lyrics(i)\r\n save2txt(music_dict[i],lyric,path)\r\n time.sleep(1)\r\n\r\n\r\n#功能三:根据歌手下载\r\n\r\n#获取歌手信息和id\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport csv\r\nimport re\r\n# chrome_driver = \"D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe\" #chromedriver的文件位置\r\n# browser = webdriver.Chrome(executable_path = chrome_driver)\r\n# wait = WebDriverWait(browser, 5) # 设置等待时间\r\ndef get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页\r\n chrome_driver = \"D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe\" # chromedriver的文件位置\r\n browser = webdriver.Chrome(executable_path=chrome_driver)\r\n wait = WebDriverWait(browser, 5) # 设置等待时间\r\n browser.get(url)\r\n browser.switch_to.frame('g_iframe')\r\n html = browser.page_source\r\n soup = BeautifulSoup(html, 'lxml')\r\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\r\n singername = []\r\n singerid = []\r\n for snames in info:\r\n name = snames.get_text()\r\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1].split('\\'')[0] #正则表达式获取歌曲id\r\n singername.append(name)\r\n singerid.append(songid)\r\n return zip(singername, singerid)\r\n\r\ndef get_data(url):\r\n data = []\r\n for singernames, singerids in get_singer(url):\r\n info = {}\r\n info['歌手名字'] = singernames\r\n info['歌手ID'] = singerids\r\n data.append(info)\r\n return data\r\n\r\ndef save2csv(url):\r\n print('保存歌手信息中...请稍后查看')\r\n with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f:\r\n # CSV 基本写入用 w,追加改模式 w 为 a\r\n fieldnames = ['歌手名字', '歌手ID']\r\n writer = csv.DictWriter(f, fieldnames=fieldnames)\r\n writer.writeheader()\r\n data = get_data(url)\r\n print(data)\r\n writer.writerows(data)\r\n print('保存成功')\r\n\r\ndef download_singer():\r\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003]\r\n for id in idlist:\r\n url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id)\r\n save2csv(url)\r\n\r\ndef get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名\r\n file = \"lib\\\\singer_info.csv\"\r\n with open(file, 'r',encoding='utf-8-sig') as f:\r\n reader = csv.reader(f)\r\n name = []\r\n id = []\r\n for i in reader:\r\n name.append(i[0])\r\n id.append(i[1])\r\n a=name.index(singer_name)\r\n return id[a]\r\n\r\n\r\n#根据歌手姓名下载\r\ndef get_html(url): #通过代理获取网页信息,输入为指定网页url\r\n proxy_addr = {'http': '61.135.217.7:80'}\r\n # 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n try:\r\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\r\n return html\r\n except BaseException:\r\n print('request error')\r\n pass\r\n\r\ndef get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页\r\n soup = BeautifulSoup(html, 'lxml')\r\n info = soup.select('.f-hide #song-list-pre-cache a')\r\n songname = []\r\n songids = []\r\n for sn in info:\r\n songnames = sn.getText()\r\n songname.append(songnames)\r\n for si in info:\r\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1].split('\\'')[0] # 用re查找,查找对象一定要是str类型\r\n songids.append(songid)\r\n return zip(songname, songids)\r\n\r\ndef lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词\r\n id=get_id(name)\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n lyric = get_lyrics(singer_info[1])\r\n save2txt(singer_info[0], lyric, path)\r\n time.sleep(1)\r\n\r\ndef save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称\r\n try:\r\n urlretrieve(songurl, path)\r\n print('歌曲下载完成:' + songname)\r\n except BaseException:\r\n print('下载失败:' + songname)\r\n pass\r\n\r\ndef songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径\r\n id=get_id(name)\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n songid = singer_info[1]\r\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)\r\n songname = singer_info[0]\r\n # path = 'D:\\\\code_new\\\\pycharm\\\\yunmusic\\\\song' + songname + '.mp3'\r\n down_path=path+'\\\\'+songname+'.mp3'\r\n save_song(songurl, down_path,songname)\r\n time.sleep(1)\r\n\r\ndef lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n lyric = get_lyrics(singer_info[1])\r\n save2txt(singer_info[0], lyric, path)\r\n time.sleep(1)\r\n\r\ndef songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n songid = singer_info[1]\r\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)\r\n songname = singer_info[0]\r\n # path = 'D:\\\\code_new\\\\pycharm\\\\yunmusic\\\\song' + songname + '.mp3'\r\n down_path = path + '\\\\' + songname + '.mp3'\r\n save_song(songurl, down_path, songname)\r\n time.sleep(1)\r\n\r\n#功能四:下载mv\r\nimport requests\r\nimport os\r\nimport sys\r\nfrom urllib.parse import urlparse,parse_qs\r\n\r\ndef http_get(api):\r\n my_cookie = {\r\n \"version\":0,\r\n \"name\":'appver',\r\n \"value\":'1.5.0.75771',\r\n \"port\":None,\r\n # \"port_specified\":False,\r\n \"domain\":'www.mydomain.com',\r\n # \"domain_specified\":False,\r\n # \"domain_initial_dot\":False,\r\n \"path\":'/',\r\n # \"path_specified\":True,\r\n \"secure\":False,\r\n \"expires\":None,\r\n \"discard\":True,\r\n \"comment\":None,\r\n \"comment_url\":None,\r\n \"rest\":{},\r\n \"rfc2109\":False\r\n }\r\n\r\n s = requests.Session()\r\n s.headers.update({'Referer': \"http://music.163.com/\"})\r\n s.cookies.set(**my_cookie)\r\n response = s.get(api)\r\n json_data = json.loads(response.text)\r\n return json_data\r\n\r\ndef download_single_mv(id): #根据mvid下载\r\n size = \"720\" #default 720p\r\n api = \"http://music.163.com/api/mv/detail?id=\"+str(id)+\"&type=mp4\"\r\n json_data = http_get(api)\r\n if json_data[\"code\"]==200:\r\n a = list(json_data[\"data\"][\"brs\"].keys())\r\n if size not in a:\r\n size = a[0] #如果没有720p,则选择最小的版本\r\n mvurl = json_data[\"data\"][\"brs\"][size] #mv网址\r\n artist = json_data[\"data\"][\"artistName\"] #歌手信息\r\n song = json_data[\"data\"][\"name\"] #歌曲信息\r\n\r\n filename = '%s/[%s]%s.mp4' %(artist,size,song)\r\n\r\n if os.path.exists(filename)==False:\r\n if os.path.exists(artist)==False:\r\n os.makedirs(artist)\r\n def reporthook(blocknum, blocksize, totalsize):\r\n readsofar = blocknum * blocksize\r\n if totalsize > 0:\r\n percent = readsofar * 1e2 / totalsize\r\n s = \"\\r%5.1f%% %*d / %d\" % (\r\n percent, len(str(totalsize)), readsofar, totalsize)\r\n sys.stderr.write(s)\r\n if readsofar >= totalsize: # near the end\r\n sys.stderr.write(\"\\n\")\r\n else: # total size is unknown\r\n sys.stderr.write(\"read %d\\n\" % (readsofar,))\r\n print(\"downloading \"+filename)\r\n urlretrieve(mvurl,filename,reporthook)\r\n\r\ndef download_mv_from_list(url): #批量下载歌单的mv资源\r\n input=url.replace(\"#\",\"\")\r\n id = parse_qs(urlparse(input).query)[\"id\"][0]\r\n if \"playlist\" in input:\r\n playlist_api = \"http://music.163.com/api/playlist/detail?id=%s\" % (id)\r\n json_data = http_get(playlist_api)\r\n for idx, mv in enumerate(json_data[\"result\"][\"tracks\"]): #mv信息\r\n download_single_mv(mv[\"mvid\"])\r\n print(\"downloaded:\" + str(idx))\r\n elif \"album\" in input:\r\n playlist_api = \"http://music.163.com/api/album/%s\" % (id)\r\n json_data = http_get(playlist_api)\r\n for idx, mv in enumerate(json_data[\"album\"][\"songs\"]):\r\n if mv[\"mvid\"] != None and mv[\"mvid\"] != 0:\r\n download_single_mv(mv[\"mvid\"])\r\n print(\"downloaded:\" + str(idx))\r\n download_single_mv(id)\r\n\r\n\r\n#功能五:爬取歌曲评论并生成词云图\r\nfrom jieba import posseg\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport wordcloud\r\n\r\ndef _content_generator(music_id): #根据歌曲id获取评论信息\r\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\r\n headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9',\r\n 'Cache-Control': 'max-age=0',\r\n 'Host': 'music.163.com',\r\n 'Proxy-Connection': 'keep-alive',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',\r\n }\r\n limit = 20\r\n offset = 0\r\n compiler = re.compile(r'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')\r\n\r\n while True:\r\n params = {\r\n 'limit': limit,\r\n 'offset': offset,\r\n }\r\n offset += limit\r\n r = requests.get(url, headers=headers, params=params)\r\n comments = r.json()['comments']\r\n has_more = r.json()['more']\r\n\r\n for t in comments:\r\n yield compiler.subn('', t['content'])[0]\r\n\r\n if not has_more:\r\n break\r\n\r\n\r\nclass WangYiMusicWordCloud: #自定义类,生成词云图\r\n stop_words = ['首歌']\r\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\r\n self.music_id = music_id #歌曲信息\r\n self.mask = mask #背景图片\r\n self.font_path = font_path #字体\r\n\r\n if not stop_words is None:\r\n self.stop_words+=stop_words\r\n\r\n self.img_wordcloud = None\r\n\r\n def _cut_word(self, comment): #分词\r\n word_pairs = posseg.lcut(comment, HMM=False)\r\n result = []\r\n for t in word_pairs:\r\n if not (t.word in result or t.word in self.stop_words):\r\n result.append(t.word)\r\n return '/'.join(result)\r\n\r\n\r\n def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存\r\n if os.path.isfile(f'{self.music_id}.txt'):\r\n print('评论文件已存在,读取文件...')\r\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\r\n return f.read()\r\n else:\r\n print('没有默认评论文件,开始爬取评论...')\r\n count = 0\r\n text = []\r\n comments = _content_generator(self.music_id)\r\n for t in comments:\r\n text.append(self._cut_word(t))\r\n\r\n count += 1\r\n print(f'\\r已爬取 {count}条评论', end='')\r\n if count % 100 == 0:\r\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\r\n time.sleep(2)\r\n\r\n str_text = '\\n'.join(text)\r\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\r\n f.write(str_text)\r\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\r\n return str_text\r\n\r\n def generate(self, **kwargs):\r\n default_kwargs = {\r\n 'background_color': \"white\",\r\n 'width': 1000,\r\n 'height': 860,\r\n 'margin': 2,\r\n 'max_words': 50,\r\n 'stopwords': wordcloud.STOPWORDS,\r\n }\r\n if not self.mask is None:\r\n default_kwargs['mask'] = np.array(Image.open(self.mask))\r\n if not self.font_path is None:\r\n default_kwargs['font_path'] = self.font_path\r\n elif 'font_path' not in kwargs:\r\n raise ValueError('缺少参数 font_path')\r\n default_kwargs.update(kwargs)\r\n\r\n str_text = self.get_words_text()\r\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\r\n self.img_wordcloud = self.wordcloud.generate(str_text)\r\n\r\n def show_wordcloud(self): #生成词云图\r\n if self.img_wordcloud is None:\r\n self.generate()\r\n\r\n plt.axis('off')\r\n plt.imshow(self.img_wordcloud)\r\n plt.show()\r\n\r\n def to_file(self, filename): #保存到本地\r\n if not hasattr(self, 'wordcloud'):\r\n self.generate()\r\n self.wordcloud.to_file(filename)\r\n\r\ndef get_wordcloud(music_id,mask,font,path): #执行函数\r\n wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font)\r\n wordcloud_obj.show_wordcloud()\r\n result=path+'\\\\'+'result.jpg'\r\n wordcloud_obj.to_file(result)\r\n\r\n\r\n",
"step-ids": [
17,
20,
21,
24,
33
]
}
|
[
17,
20,
21,
24,
33
] |
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
'''
ans = set()
n = len(nums)
for x, val in enumerate(nums):
for y in range(x + 1, n + 1):
ans.add(frozenset(nums[x:y]))
for u in range(0, x + 1):
for z in range(y + 1, n + 1):
ans.add(frozenset([nums[u]] + nums[y:z + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))
ans.add(frozenset([nums[u]] + nums[z:n + 1]))
ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))
ans.add(frozenset([]))
return ans
'''
all_subsets = [[]]
if nums:
for num in nums:
for idx in range(len(all_subsets)):
all_subsets.append(all_subsets[idx] + [num])
return all_subsets
|
normal
|
{
"blob_id": "7d873ed216355d1688ec79ff337304d8ebfd2754",
"index": 7625,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def subsets(self, nums: List[int]) ->List[List[int]]:\n \"\"\"\n ans = set()\n n = len(nums)\n for x, val in enumerate(nums):\n for y in range(x + 1, n + 1):\n ans.add(frozenset(nums[x:y]))\n for u in range(0, x + 1):\n for z in range(y + 1, n + 1):\n \n ans.add(frozenset([nums[u]] + nums[y:z + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))\n\n ans.add(frozenset([nums[u]] + nums[z:n + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))\n \n ans.add(frozenset([]))\n return ans\n \"\"\"\n all_subsets = [[]]\n if nums:\n for num in nums:\n for idx in range(len(all_subsets)):\n all_subsets.append(all_subsets[idx] + [num])\n return all_subsets\n",
"step-4": "class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n '''\n ans = set()\n n = len(nums)\n for x, val in enumerate(nums):\n for y in range(x + 1, n + 1):\n ans.add(frozenset(nums[x:y]))\n for u in range(0, x + 1):\n for z in range(y + 1, n + 1):\n \n ans.add(frozenset([nums[u]] + nums[y:z + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[y:z + 1]))\n\n ans.add(frozenset([nums[u]] + nums[z:n + 1]))\n ans.add(frozenset(nums[0:u + 1] + nums[z:n + 1]))\n \n ans.add(frozenset([]))\n return ans\n '''\n \n all_subsets = [[]]\n \n if nums:\n \n for num in nums:\n for idx in range(len(all_subsets)):\n \n all_subsets.append(all_subsets[idx] + [num])\n \n \n return all_subsets\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
from bs4 import BeautifulSoup
import json
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
url = 'http://api.tvmaze.com/singlesearch/shows?q=game+of+throne&embed=episodes'
response = requests.get(url, headers = headers)
content = json.loads(response.text)
x = json.dumps(content, ensure_ascii=False,indent =2)
with open('got_info.json','w') as f:
f.write(x)
# print(cele_info.split(' '))
|
normal
|
{
"blob_id": "d625e6724a3fe077a6f80b6de6b1f5bb0b95d47d",
"index": 4612,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('got_info.json', 'w') as f:\n f.write(x)\n",
"step-3": "<mask token>\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\nurl = (\n 'http://api.tvmaze.com/singlesearch/shows?q=game+of+throne&embed=episodes')\nresponse = requests.get(url, headers=headers)\ncontent = json.loads(response.text)\nx = json.dumps(content, ensure_ascii=False, indent=2)\nwith open('got_info.json', 'w') as f:\n f.write(x)\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport json\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\nurl = (\n 'http://api.tvmaze.com/singlesearch/shows?q=game+of+throne&embed=episodes')\nresponse = requests.get(url, headers=headers)\ncontent = json.loads(response.text)\nx = json.dumps(content, ensure_ascii=False, indent=2)\nwith open('got_info.json', 'w') as f:\n f.write(x)\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport json\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\nurl = 'http://api.tvmaze.com/singlesearch/shows?q=game+of+throne&embed=episodes'\n\nresponse = requests.get(url, headers = headers)\n\ncontent = json.loads(response.text)\nx = json.dumps(content, ensure_ascii=False,indent =2)\nwith open('got_info.json','w') as f:\n\tf.write(x)\n\n# print(cele_info.split(' '))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve(n, seq):
flag = True
freq = defaultdict()
i = 1
p = len(seq)
j = 0
while j < p:
c = seq[j]
if i > n:
flag = False
break
if c in freq.keys():
if freq[c] == 1:
freq[c] = 0
i -= 1
else:
freq[c] = 1
i += 1
if c not in freq.keys():
freq[c] = 1
i += 1
j += 1
if flag == True:
return 0
else:
return 1
<|reserved_special_token_0|>
if solve(n, seq):
print('Satisfied')
else:
print('Not Satisfied')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve(n, seq):
flag = True
freq = defaultdict()
i = 1
p = len(seq)
j = 0
while j < p:
c = seq[j]
if i > n:
flag = False
break
if c in freq.keys():
if freq[c] == 1:
freq[c] = 0
i -= 1
else:
freq[c] = 1
i += 1
if c not in freq.keys():
freq[c] = 1
i += 1
j += 1
if flag == True:
return 0
else:
return 1
n = int(input())
seq = input()
if solve(n, seq):
print('Satisfied')
else:
print('Not Satisfied')
<|reserved_special_token_1|>
from collections import defaultdict
def solve(n, seq):
flag = True
freq = defaultdict()
i = 1
p = len(seq)
j = 0
while j < p:
c = seq[j]
if i > n:
flag = False
break
if c in freq.keys():
if freq[c] == 1:
freq[c] = 0
i -= 1
else:
freq[c] = 1
i += 1
if c not in freq.keys():
freq[c] = 1
i += 1
j += 1
if flag == True:
return 0
else:
return 1
n = int(input())
seq = input()
if solve(n, seq):
print('Satisfied')
else:
print('Not Satisfied')
<|reserved_special_token_1|>
from collections import defaultdict
def solve(n, seq):
flag = True
# slot = [0] * (n + 10)
freq = defaultdict()
# refer to next free slot
i = 1
p = len(seq)
j = 0
while j < p:
c = seq[j]
if i > n:
flag = False
break
if c in freq.keys():
if freq[c] == 1:
freq[c] = 0
i -= 1
else:
freq[c] = 1
i += 1
if c not in freq.keys():
freq[c] = 1
i += 1
j += 1
if flag == True:
return 0
else:
return 1
# number of computers
n = int(input())
seq = input()
if solve(n, seq):
print("Satisfied")
else:
print("Not Satisfied")
|
flexible
|
{
"blob_id": "89b03bb5ca86e426459e23866f86f8770e4a1613",
"index": 3420,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solve(n, seq):\n flag = True\n freq = defaultdict()\n i = 1\n p = len(seq)\n j = 0\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n if flag == True:\n return 0\n else:\n return 1\n\n\n<mask token>\nif solve(n, seq):\n print('Satisfied')\nelse:\n print('Not Satisfied')\n",
"step-3": "<mask token>\n\n\ndef solve(n, seq):\n flag = True\n freq = defaultdict()\n i = 1\n p = len(seq)\n j = 0\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n if flag == True:\n return 0\n else:\n return 1\n\n\nn = int(input())\nseq = input()\nif solve(n, seq):\n print('Satisfied')\nelse:\n print('Not Satisfied')\n",
"step-4": "from collections import defaultdict\n\n\ndef solve(n, seq):\n flag = True\n freq = defaultdict()\n i = 1\n p = len(seq)\n j = 0\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n if flag == True:\n return 0\n else:\n return 1\n\n\nn = int(input())\nseq = input()\nif solve(n, seq):\n print('Satisfied')\nelse:\n print('Not Satisfied')\n",
"step-5": "from collections import defaultdict\n\ndef solve(n, seq):\n flag = True\n # slot = [0] * (n + 10)\n freq = defaultdict()\n\n # refer to next free slot\n i = 1\n p = len(seq)\n j = 0\n\n while j < p:\n c = seq[j]\n if i > n:\n flag = False\n break\n if c in freq.keys():\n if freq[c] == 1:\n freq[c] = 0\n i -= 1\n else:\n freq[c] = 1\n i += 1\n if c not in freq.keys():\n freq[c] = 1\n i += 1\n j += 1\n\n if flag == True:\n return 0\n else:\n return 1 \n\n\n# number of computers\nn = int(input())\nseq = input()\n\nif solve(n, seq):\n print(\"Satisfied\")\nelse:\n print(\"Not Satisfied\")",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Preprocessing(instancia):
instancia = re.sub('#\\S+', '', instancia)
instancia = re.sub('@\\S+', '', instancia).lower().replace('.', ''
).replace(';', '').replace('-', '').replace(':', '').replace(')', ''
).replace('"', '').replace(',', '')
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return ' '.join(palavras)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tw_dict = {'created_at': [], 'id': [], 'id_str': [], 'full_text': [],
'entities': [], 'source': [], 'user': [], 'lang': []}
def Preprocessing(instancia):
instancia = re.sub('#\\S+', '', instancia)
instancia = re.sub('@\\S+', '', instancia).lower().replace('.', ''
).replace(';', '').replace('-', '').replace(':', '').replace(')', ''
).replace('"', '').replace(',', '')
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return ' '.join(palavras)
<|reserved_special_token_1|>
import nltk
tw_dict = {'created_at': [], 'id': [], 'id_str': [], 'full_text': [],
'entities': [], 'source': [], 'user': [], 'lang': []}
def Preprocessing(instancia):
instancia = re.sub('#\\S+', '', instancia)
instancia = re.sub('@\\S+', '', instancia).lower().replace('.', ''
).replace(';', '').replace('-', '').replace(':', '').replace(')', ''
).replace('"', '').replace(',', '')
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return ' '.join(palavras)
<|reserved_special_token_1|>
import nltk
tw_dict = {'created_at':[],
'id':[],
'id_str':[],
'full_text':[],
'entities':[],
'source':[],
'user':[],
'lang':[]}
def Preprocessing(instancia):
# Remove caracteres indesejados.
instancia = re.sub(r"#\S+", "", instancia)
instancia = re.sub(r"@\S+", "", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','').replace('"','').replace(',','')
# Removendo palavras e termos frequentes que não tem relevância nos dados.
stopwords = set(nltk.corpus.stopwords.words('portuguese'))
palavras = [i for i in instancia.split() if not i in stopwords]
return (" ".join(palavras))
|
flexible
|
{
"blob_id": "bffd211a2d2dc3dd9b596f69909be7f0437ab0c8",
"index": 9322,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Preprocessing(instancia):\n instancia = re.sub('#\\\\S+', '', instancia)\n instancia = re.sub('@\\\\S+', '', instancia).lower().replace('.', ''\n ).replace(';', '').replace('-', '').replace(':', '').replace(')', ''\n ).replace('\"', '').replace(',', '')\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return ' '.join(palavras)\n",
"step-3": "<mask token>\ntw_dict = {'created_at': [], 'id': [], 'id_str': [], 'full_text': [],\n 'entities': [], 'source': [], 'user': [], 'lang': []}\n\n\ndef Preprocessing(instancia):\n instancia = re.sub('#\\\\S+', '', instancia)\n instancia = re.sub('@\\\\S+', '', instancia).lower().replace('.', ''\n ).replace(';', '').replace('-', '').replace(':', '').replace(')', ''\n ).replace('\"', '').replace(',', '')\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return ' '.join(palavras)\n",
"step-4": "import nltk\ntw_dict = {'created_at': [], 'id': [], 'id_str': [], 'full_text': [],\n 'entities': [], 'source': [], 'user': [], 'lang': []}\n\n\ndef Preprocessing(instancia):\n instancia = re.sub('#\\\\S+', '', instancia)\n instancia = re.sub('@\\\\S+', '', instancia).lower().replace('.', ''\n ).replace(';', '').replace('-', '').replace(':', '').replace(')', ''\n ).replace('\"', '').replace(',', '')\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return ' '.join(palavras)\n",
"step-5": "import nltk\n\ntw_dict = {'created_at':[],\n 'id':[],\n 'id_str':[],\n 'full_text':[],\n 'entities':[],\n 'source':[],\n 'user':[],\n 'lang':[]}\n\ndef Preprocessing(instancia):\n # Remove caracteres indesejados.\n instancia = re.sub(r\"#\\S+\", \"\", instancia)\n instancia = re.sub(r\"@\\S+\", \"\", instancia).lower().replace('.','').replace(';','').replace('-','').replace(':','').replace(')','').replace('\"','').replace(',','')\n # Removendo palavras e termos frequentes que não tem relevância nos dados.\n stopwords = set(nltk.corpus.stopwords.words('portuguese'))\n palavras = [i for i in instancia.split() if not i in stopwords]\n return (\" \".join(palavras))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
<|reserved_special_token_0|>
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
<|reserved_special_token_0|>
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
<|reserved_special_token_0|>
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
line = line.replace('\n', '')
result = line.split(' ')
a = [int(result[0]), int(result[1]), int(result[2])]
return a
<|reserved_special_token_0|>
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
<|reserved_special_token_0|>
def IDS(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
find = [0]
limit = 0
while find[0] != 1:
limit += 1
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num = limit
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
<|reserved_special_token_0|>
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = 'third_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = 'second_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = 'first_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
line = line.replace('\n', '')
result = line.split(' ')
a = [int(result[0]), int(result[1]), int(result[2])]
return a
def text_read(where, row, col):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace('\n', '')
result2 = line2.split(' ')
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
<|reserved_special_token_0|>
def IDS(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
find = [0]
limit = 0
while find[0] != 1:
limit += 1
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num = limit
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
<|reserved_special_token_0|>
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = 'third_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = 'second_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = 'first_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.setrecursionlimit(100000)
dx = [1, 0, 0, -1]
dy = [0, 1, -1, 0]
class PriorityQueue:
pq = []
elements = {}
task = 0
def insert(self, priority, x_val, y_val):
entry = [priority, self.task, x_val, y_val]
self.elements[self.task] = entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self, task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val, y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val, y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where, out_list, ans, row, col):
f = open(where + '_output.txt', 'w')
for i in range(1, row + 1):
for j in range(1, col + 1):
data = '%d ' % out_list[i][j]
f.write(data)
f.write('\n')
f.write('---\n')
data2 = 'length = %d\n' % ans[0]
f.write(data2)
data3 = 'time = %d' % ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
line = line.replace('\n', '')
result = line.split(' ')
a = [int(result[0]), int(result[1]), int(result[2])]
return a
def text_read(where, row, col):
f = open('./input/' + where + '.txt', 'r')
line = f.readline()
list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace('\n', '')
result2 = line2.split(' ')
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos, out_list, row, col):
for r in range(1, row + 1):
for c in range(1, col + 1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c
] == 4:
pos.append([r, c])
return pos
def bfs(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
q = queue.Queue()
q.put(start)
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
ans[1] += 1
while not q.empty():
if visit[end[0]][end[1]] == 1:
break
cur_task = q.get()
x = cur_task[0]
y = cur_task[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
q.put([nx, ny])
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def IDS(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
find = [0]
limit = 0
while find[0] != 1:
limit += 1
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num = limit
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start, end, out_list, row, col, ans, limit, des, visit, find):
if visit[end[0]][end[1]] == 1:
find[0] = 1
return
x = start[0]
y = start[1]
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny
] != 1:
if visit[nx][ny] != 1:
if des[x][y] + 1 <= limit:
visit[nx][ny] = 1
des[nx][ny] = des[x][y] + 1
next_start = [nx, ny]
ans[1] += 1
dfs(next_start, end, out_list, row, col, ans, limit,
des, visit, find)
def astar(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def greedy(start, end, out_list, row, col, ans):
des = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1
des[start[0]][start[1]] = 0
pq2 = PriorityQueue()
while pq2.size() != 0:
pq2.pop()
manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])
pq2.insert(manhattan_d, start[0], start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx
][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] = 1
des[nx][ny] = des[x_val][y_val] + 1
d = abs(nx - end[0]) + abs(ny - end[1])
pq2.insert(d, nx, ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and
out_list[ntx][nty] != 1):
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def test_floor():
where = 'test1'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def fifth_floor():
where = 'fifth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def forth_floor():
where = 'fourth_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = 'third_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = 'second_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = 'first_floor'
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
fifth_floor()
forth_floor()
third_floor()
second_floor()
first_floor()
<|reserved_special_token_1|>
import queue
import copy
import heapq
import sys
sys.setrecursionlimit(100000)
dx =[1,0,0,-1]
dy=[0,1,-1,0]
class PriorityQueue:
pq=[]
elements={}
task=0
def insert(self , priority,x_val,y_val):
entry = [priority, self.task,x_val,y_val]
self.elements[self.task]=entry
heapq.heappush(self.pq, entry)
self.task += 1
def delete(self,task):
entry = self.elements[task]
entry[-1] = None
def pop(self):
while self.pq:
priority, task, x_val , y_val = heapq.heappop(self.pq)
if task != None:
del self.elements[task]
return priority, x_val , y_val
raise KeyError('Pop from an empty Priority Queue')
def size(self):
return len(self.elements)
def text_write(where , out_list,ans,row,col):
f = open( where + "_output.txt", 'w')
for i in range(1,row+1):
for j in range(1,col+1):
data ="%d " %out_list[i][j]
f.write(data)
f.write("\n")
f.write("---\n")
data2 = "length = %d\n" %ans[0]
f.write(data2)
data3 = "time = %d" %ans[1]
f.write(data3)
f.close()
def text_info(where):
f = open("./input/" + where+".txt" , 'r')
line = f.readline()
line = line.replace("\n", "")
result = line.split(" ")
a=[int(result[0]),int(result[1]),int(result[2])]
return a
def text_read(where,row,col):
f = open("./input/"+where+".txt", 'r')
line = f.readline()
list1 = [[0 for cols in range(col + 1)] for rows in range(row + 1)]
a = 1
line2 = f.readline()
while line2:
line2 = line2.replace("\n", "")
result2 = line2.split(" ")
for v in range(col):
list1[a][v + 1] = int(result2[v])
line2 = f.readline()
a += 1
f.close()
return list1
def position_check(pos , out_list , row , col):
for r in range(1,row+1):
for c in range(1,col+1):
if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c] == 4:
pos.append([r,c])
return pos
def bfs(start ,end, out_list , row , col , ans):
des = [[0 for c in range(col+1)] for r in range(row+1)]
visit = [[0 for c in range(col+1)] for r in range(row+1)]
q = queue.Queue()
q.put(start)
visit[start[0]][start[1]]=1;
des[start[0]][start[1]]=0;
ans[1] +=1
while not q.empty():
if visit[end[0]][end[1]] ==1:
break
cur_task = q.get()
x=cur_task[0]
y=cur_task[1]
for k in range (4):
nx = x + dx[k]
ny = y + dy[k]
if nx >= 1 and nx <=row and ny >=1 and ny<=col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny] =1
des[nx][ny] = des[x][y] +1
q.put([nx,ny])
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0],end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def IDS(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
find=[0]
limit = 0
while find[0] != 1:
limit +=1
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
des[start[0]][start[1]] = 0;
visit[start[0]][start[1]] = 1
dfs(start, end, out_list, row, col, ans, limit, des, visit, find)
ans[0] += limit
num=limit
target = [end[0],end[1]]
for n in range(num, 0, -1):
tx = target[0]
ty = target[1]
out_list[tx][ty] = 5
for k in range(4):
ntx = tx + dx[k]
nty = ty + dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n - 1:
target = [ntx, nty]
return out_list
def dfs(start , end , out_list , row , col ,ans , limit,des,visit,find):
if visit[end[0]][end[1]] == 1:
find[0]=1
return
x=start[0]
y=start[1]
for k in range(4):
nx = x+dx[k]
ny=y+dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
if des[x][y]+1 <=limit:
visit[nx][ny]=1
des[nx][ny] = des[x][y]+1
next_start=[nx,ny]
ans[1]+=1
dfs(next_start , end , out_list , row , col , ans , limit, des , visit,find)
def astar(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1;
des[start[0]][start[1]] = 0;
pq2 = PriorityQueue()
while pq2.size() !=0:
pq2.pop()
manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])
pq2.insert(manhattan_d,start[0],start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny]=1
des[nx][ny]=des[x_val][y_val]+1
d=abs(nx-end[0])+abs(ny-end[1])+des[nx][ny]
pq2.insert(d,nx,ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def greedy(start , end , out_list , row , col , ans):
des = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit = [[0 for c in range(col + 1)] for r in range(row + 1)]
visit[start[0]][start[1]] = 1;
des[start[0]][start[1]] = 0;
pq2 = PriorityQueue()
while pq2.size() !=0:
pq2.pop()
manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])
pq2.insert(manhattan_d,start[0],start[1])
while pq2.size() != 0:
if visit[end[0]][end[1]] == 1:
break
priority, x_val, y_val = pq2.pop()
for k in range(4):
nx = x_val + dx[k]
ny = y_val + dy[k]
if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:
if visit[nx][ny] != 1:
visit[nx][ny]=1
des[nx][ny]=des[x_val][y_val]+1
d=abs(nx-end[0])+abs(ny-end[1])
pq2.insert(d,nx,ny)
ans[1] += 1
ans[0] += des[end[0]][end[1]]
num = des[end[0]][end[1]]
target = [end[0], end[1]]
for n in range(num,0,-1):
tx=target[0]
ty=target[1]
out_list[tx][ty]=5
for k in range(4):
ntx=tx+dx[k]
nty=ty+dy[k]
if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:
if des[ntx][nty] == n-1:
target=[ntx,nty]
return out_list
def test_floor():
where = "test1"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where , row , col)
pos = []
pos = position_check(pos,out_list, row , col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans=[0,0]
#path1=bfs(pos[0],pos[1],deepcopy_copy1,row,col,ans)
#path2=bfs(pos[1],pos[2],deepcopy_copy2,row,col,ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0],pos[1],deepcopy_copy1,row,col,ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0],pos[1],deepcopy_copy1,row,col,ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def fifth_floor():
where = "fifth_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def forth_floor():
where = "fourth_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def third_floor():
where = "third_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def second_floor():
where = "second_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1):
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
def first_floor():
where = "first_floor"
info = text_info(where)
row = info[1]
col = info[2]
out_list = text_read(where, row, col)
pos = []
pos = position_check(pos, out_list, row, col)
deepcopy_copy1 = copy.deepcopy(out_list)
deepcopy_copy2 = copy.deepcopy(out_list)
ans = [0, 0]
#path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)
#path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)
#path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)
path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)
path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)
for i in range(1, row):
for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ
if path1[i][j] == 5 or path2[i][j] == 5:
out_list[i][j] = 5
text_write(where, out_list, ans, row, col)
#test_floor()
fifth_floor()
forth_floor()
third_floor()
second_floor()
first_floor()
|
flexible
|
{
"blob_id": "6192099bdecffd9ce3576f4034567478145115a0",
"index": 1291,
"step-1": "<mask token>\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\n<mask token>\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\n<mask token>\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\n<mask token>\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\ndef text_info(where):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n line = line.replace('\\n', '')\n result = line.split(' ')\n a = [int(result[0]), int(result[1]), int(result[2])]\n return a\n\n\n<mask token>\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\n<mask token>\n\n\ndef IDS(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n find = [0]\n limit = 0\n while find[0] != 1:\n limit += 1\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0\n visit[start[0]][start[1]] = 1\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n ans[0] += limit\n num = limit\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\n<mask token>\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = 'third_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef second_floor():\n where = 'second_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef first_floor():\n where = 'first_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\ndef text_info(where):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n line = line.replace('\\n', '')\n result = line.split(' ')\n a = [int(result[0]), int(result[1]), int(result[2])]\n return a\n\n\ndef text_read(where, row, col):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]\n a = 1\n line2 = f.readline()\n while line2:\n line2 = line2.replace('\\n', '')\n result2 = line2.split(' ')\n for v in range(col):\n list1[a][v + 1] = int(result2[v])\n line2 = f.readline()\n a += 1\n f.close()\n return list1\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\n<mask token>\n\n\ndef IDS(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n find = [0]\n limit = 0\n while find[0] != 1:\n limit += 1\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0\n visit[start[0]][start[1]] = 1\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n ans[0] += limit\n num = limit\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\n<mask token>\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = 'third_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef second_floor():\n where = 'second_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef first_floor():\n where = 'first_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.setrecursionlimit(100000)\ndx = [1, 0, 0, -1]\ndy = [0, 1, -1, 0]\n\n\nclass PriorityQueue:\n pq = []\n elements = {}\n task = 0\n\n def insert(self, priority, x_val, y_val):\n entry = [priority, self.task, x_val, y_val]\n self.elements[self.task] = entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self, task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val, y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val, y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\ndef text_write(where, out_list, ans, row, col):\n f = open(where + '_output.txt', 'w')\n for i in range(1, row + 1):\n for j in range(1, col + 1):\n data = '%d ' % out_list[i][j]\n f.write(data)\n f.write('\\n')\n f.write('---\\n')\n data2 = 'length = %d\\n' % ans[0]\n f.write(data2)\n data3 = 'time = %d' % ans[1]\n f.write(data3)\n f.close()\n\n\ndef text_info(where):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n line = line.replace('\\n', '')\n result = line.split(' ')\n a = [int(result[0]), int(result[1]), int(result[2])]\n return a\n\n\ndef text_read(where, row, col):\n f = open('./input/' + where + '.txt', 'r')\n line = f.readline()\n list1 = [[(0) for cols in range(col + 1)] for rows in range(row + 1)]\n a = 1\n line2 = f.readline()\n while line2:\n line2 = line2.replace('\\n', '')\n result2 = line2.split(' ')\n for v in range(col):\n list1[a][v + 1] = int(result2[v])\n line2 = f.readline()\n a += 1\n f.close()\n return list1\n\n\ndef position_check(pos, out_list, row, col):\n for r in range(1, row + 1):\n for c in range(1, col + 1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c\n ] == 4:\n pos.append([r, c])\n return pos\n\n\ndef bfs(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n q = queue.Queue()\n q.put(start)\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n ans[1] += 1\n while not q.empty():\n if visit[end[0]][end[1]] == 1:\n break\n cur_task = q.get()\n x = cur_task[0]\n y = cur_task[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n q.put([nx, ny])\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef IDS(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n find = [0]\n limit = 0\n while find[0] != 1:\n limit += 1\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0\n visit[start[0]][start[1]] = 1\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n ans[0] += limit\n num = limit\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef dfs(start, end, out_list, row, col, ans, limit, des, visit, find):\n if visit[end[0]][end[1]] == 1:\n find[0] = 1\n return\n x = start[0]\n y = start[1]\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny\n ] != 1:\n if visit[nx][ny] != 1:\n if des[x][y] + 1 <= limit:\n visit[nx][ny] = 1\n des[nx][ny] = des[x][y] + 1\n next_start = [nx, ny]\n ans[1] += 1\n dfs(next_start, end, out_list, row, col, ans, limit,\n des, visit, find)\n\n\ndef astar(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1]) + des[nx][ny]\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef greedy(start, end, out_list, row, col, ans):\n des = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit = [[(0) for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1\n des[start[0]][start[1]] = 0\n pq2 = PriorityQueue()\n while pq2.size() != 0:\n pq2.pop()\n manhattan_d = abs(start[0] - end[0]) + abs(start[1] - end[1])\n pq2.insert(manhattan_d, start[0], start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx\n ][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] = 1\n des[nx][ny] = des[x_val][y_val] + 1\n d = abs(nx - end[0]) + abs(ny - end[1])\n pq2.insert(d, nx, ny)\n ans[1] += 1\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if (ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and \n out_list[ntx][nty] != 1):\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n return out_list\n\n\ndef test_floor():\n where = 'test1'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef fifth_floor():\n where = 'fifth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef forth_floor():\n where = 'fourth_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = 'third_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef second_floor():\n where = 'second_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\ndef first_floor():\n where = 'first_floor'\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n text_write(where, out_list, ans, row, col)\n\n\nfifth_floor()\nforth_floor()\nthird_floor()\nsecond_floor()\nfirst_floor()\n",
"step-5": "import queue\nimport copy\nimport heapq\nimport sys\nsys.setrecursionlimit(100000)\n\ndx =[1,0,0,-1]\ndy=[0,1,-1,0]\n\nclass PriorityQueue:\n pq=[]\n elements={}\n task=0\n\n def insert(self , priority,x_val,y_val):\n entry = [priority, self.task,x_val,y_val]\n self.elements[self.task]=entry\n heapq.heappush(self.pq, entry)\n self.task += 1\n\n def delete(self,task):\n entry = self.elements[task]\n entry[-1] = None\n\n def pop(self):\n while self.pq:\n priority, task, x_val , y_val = heapq.heappop(self.pq)\n if task != None:\n del self.elements[task]\n return priority, x_val , y_val\n raise KeyError('Pop from an empty Priority Queue')\n\n def size(self):\n return len(self.elements)\n\n\n\ndef text_write(where , out_list,ans,row,col):\n f = open( where + \"_output.txt\", 'w')\n\n for i in range(1,row+1):\n for j in range(1,col+1):\n data =\"%d \" %out_list[i][j]\n f.write(data)\n f.write(\"\\n\")\n f.write(\"---\\n\")\n data2 = \"length = %d\\n\" %ans[0]\n f.write(data2)\n data3 = \"time = %d\" %ans[1]\n f.write(data3)\n f.close()\n\n\n\ndef text_info(where):\n f = open(\"./input/\" + where+\".txt\" , 'r')\n line = f.readline()\n line = line.replace(\"\\n\", \"\")\n result = line.split(\" \")\n a=[int(result[0]),int(result[1]),int(result[2])]\n return a\n\ndef text_read(where,row,col):\n f = open(\"./input/\"+where+\".txt\", 'r')\n line = f.readline()\n list1 = [[0 for cols in range(col + 1)] for rows in range(row + 1)]\n a = 1\n line2 = f.readline()\n while line2:\n line2 = line2.replace(\"\\n\", \"\")\n result2 = line2.split(\" \")\n for v in range(col):\n list1[a][v + 1] = int(result2[v])\n line2 = f.readline()\n a += 1\n f.close()\n return list1\n\ndef position_check(pos , out_list , row , col):\n for r in range(1,row+1):\n for c in range(1,col+1):\n if out_list[r][c] == 3 or out_list[r][c] == 6 or out_list[r][c] == 4:\n pos.append([r,c])\n return pos\n\ndef bfs(start ,end, out_list , row , col , ans):\n des = [[0 for c in range(col+1)] for r in range(row+1)]\n visit = [[0 for c in range(col+1)] for r in range(row+1)]\n q = queue.Queue()\n q.put(start)\n visit[start[0]][start[1]]=1;\n des[start[0]][start[1]]=0;\n ans[1] +=1\n while not q.empty():\n if visit[end[0]][end[1]] ==1:\n break\n cur_task = q.get()\n x=cur_task[0]\n y=cur_task[1]\n for k in range (4):\n nx = x + dx[k]\n ny = y + dy[k]\n if nx >= 1 and nx <=row and ny >=1 and ny<=col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny] =1\n des[nx][ny] = des[x][y] +1\n q.put([nx,ny])\n ans[1] += 1\n\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0],end[1]]\n\n for n in range(num,0,-1):\n tx=target[0]\n ty=target[1]\n out_list[tx][ty]=5\n for k in range(4):\n ntx=tx+dx[k]\n nty=ty+dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n-1:\n target=[ntx,nty]\n return out_list\n\ndef IDS(start , end , out_list , row , col , ans):\n des = [[0 for c in range(col + 1)] for r in range(row + 1)]\n find=[0]\n limit = 0\n while find[0] != 1:\n limit +=1\n visit = [[0 for c in range(col + 1)] for r in range(row + 1)]\n des[start[0]][start[1]] = 0;\n visit[start[0]][start[1]] = 1\n\n dfs(start, end, out_list, row, col, ans, limit, des, visit, find)\n\n ans[0] += limit\n num=limit\n target = [end[0],end[1]]\n\n for n in range(num, 0, -1):\n tx = target[0]\n ty = target[1]\n out_list[tx][ty] = 5\n for k in range(4):\n ntx = tx + dx[k]\n nty = ty + dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n - 1:\n target = [ntx, nty]\n\n return out_list\n\ndef dfs(start , end , out_list , row , col ,ans , limit,des,visit,find):\n if visit[end[0]][end[1]] == 1:\n find[0]=1\n return\n x=start[0]\n y=start[1]\n for k in range(4):\n nx = x+dx[k]\n ny=y+dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n if des[x][y]+1 <=limit:\n visit[nx][ny]=1\n des[nx][ny] = des[x][y]+1\n next_start=[nx,ny]\n ans[1]+=1\n dfs(next_start , end , out_list , row , col , ans , limit, des , visit,find)\n\ndef astar(start , end , out_list , row , col , ans):\n des = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1;\n des[start[0]][start[1]] = 0;\n\n pq2 = PriorityQueue()\n while pq2.size() !=0:\n pq2.pop()\n manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])\n pq2.insert(manhattan_d,start[0],start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny]=1\n des[nx][ny]=des[x_val][y_val]+1\n d=abs(nx-end[0])+abs(ny-end[1])+des[nx][ny]\n pq2.insert(d,nx,ny)\n ans[1] += 1\n\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n\n for n in range(num,0,-1):\n tx=target[0]\n ty=target[1]\n out_list[tx][ty]=5\n for k in range(4):\n ntx=tx+dx[k]\n nty=ty+dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n-1:\n target=[ntx,nty]\n return out_list\n\ndef greedy(start , end , out_list , row , col , ans):\n des = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit = [[0 for c in range(col + 1)] for r in range(row + 1)]\n visit[start[0]][start[1]] = 1;\n des[start[0]][start[1]] = 0;\n\n pq2 = PriorityQueue()\n while pq2.size() !=0:\n pq2.pop()\n manhattan_d = abs(start[0]-end[0])+abs(start[1]-end[1])\n pq2.insert(manhattan_d,start[0],start[1])\n while pq2.size() != 0:\n if visit[end[0]][end[1]] == 1:\n break\n priority, x_val, y_val = pq2.pop()\n for k in range(4):\n nx = x_val + dx[k]\n ny = y_val + dy[k]\n if nx >= 1 and nx <= row and ny >= 1 and ny <= col and out_list[nx][ny] != 1:\n if visit[nx][ny] != 1:\n visit[nx][ny]=1\n des[nx][ny]=des[x_val][y_val]+1\n d=abs(nx-end[0])+abs(ny-end[1])\n pq2.insert(d,nx,ny)\n ans[1] += 1\n\n ans[0] += des[end[0]][end[1]]\n num = des[end[0]][end[1]]\n target = [end[0], end[1]]\n\n for n in range(num,0,-1):\n tx=target[0]\n ty=target[1]\n out_list[tx][ty]=5\n for k in range(4):\n ntx=tx+dx[k]\n nty=ty+dy[k]\n if ntx >= 1 and ntx <= row and nty >= 1 and nty <= col and out_list[ntx][nty] != 1:\n if des[ntx][nty] == n-1:\n target=[ntx,nty]\n return out_list\n\n\ndef test_floor():\n where = \"test1\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where , row , col)\n pos = []\n pos = position_check(pos,out_list, row , col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans=[0,0]\n\n #path1=bfs(pos[0],pos[1],deepcopy_copy1,row,col,ans)\n #path2=bfs(pos[1],pos[2],deepcopy_copy2,row,col,ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0],pos[1],deepcopy_copy1,row,col,ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0],pos[1],deepcopy_copy1,row,col,ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n\ndef fifth_floor():\n where = \"fifth_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n\ndef forth_floor():\n where = \"fourth_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n\ndef third_floor():\n where = \"third_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\ndef second_floor():\n where = \"second_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1):\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\ndef first_floor():\n where = \"first_floor\"\n info = text_info(where)\n row = info[1]\n col = info[2]\n out_list = text_read(where, row, col)\n pos = []\n pos = position_check(pos, out_list, row, col)\n deepcopy_copy1 = copy.deepcopy(out_list)\n deepcopy_copy2 = copy.deepcopy(out_list)\n ans = [0, 0]\n\n #path1 = bfs(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = bfs(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = IDS(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = IDS(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n #path1 = astar(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n #path2 = astar(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n path1 = greedy(pos[0], pos[1], deepcopy_copy1, row, col, ans)\n path2 = greedy(pos[1], pos[2], deepcopy_copy2, row, col, ans)\n\n for i in range(1, row):\n for j in range(1, col + 1): # col 한개 안하면... 뭔가 될듯 ㅋㅋ\n if path1[i][j] == 5 or path2[i][j] == 5:\n out_list[i][j] = 5\n\n text_write(where, out_list, ans, row, col)\n\n#test_floor()\nfifth_floor()\nforth_floor()\nthird_floor()\nsecond_floor()\nfirst_floor()\n\n",
"step-ids": [
12,
17,
18,
23,
25
]
}
|
[
12,
17,
18,
23,
25
] |
import json
# numbers=[2,3,5,7,11,13]
filename='numbers.json'
with open(filename) as f:
numbers=json.load(f)
print(numbers)
|
normal
|
{
"blob_id": "8da775bd87bfeab5e30956e62bcdba6c04e26b27",
"index": 6720,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(filename) as f:\n numbers = json.load(f)\nprint(numbers)\n",
"step-3": "<mask token>\nfilename = 'numbers.json'\nwith open(filename) as f:\n numbers = json.load(f)\nprint(numbers)\n",
"step-4": "import json\nfilename = 'numbers.json'\nwith open(filename) as f:\n numbers = json.load(f)\nprint(numbers)\n",
"step-5": "import json\n\n# numbers=[2,3,5,7,11,13]\n\nfilename='numbers.json'\n\nwith open(filename) as f:\n numbers=json.load(f)\n\nprint(numbers)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@udf(returnType=BooleanType())
def filter_host(item):
for i in filter_hosts:
if item.find(i) != -1:
return False
return True
<|reserved_special_token_0|>
@udf(returnType=BooleanType())
def contains_host(item):
for i in contains_hosts:
if item.find(i) != -1:
return True
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myconf.setAppName('test').setMaster('local[40]')
myconf.set('spark.executor.instances', '40')
myconf.set('spark.driver.memory', '6G')
myconf.set('spark.executor.cores', '40')
myconf.set('spark.task.cpus', '40')
myconf.set('spark.jars.packages',
'org.mongodb.spark:mongo-spark-connector_2.11:2.4.1')
<|reserved_special_token_0|>
logger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)
<|reserved_special_token_0|>
@udf(returnType=BooleanType())
def filter_host(item):
for i in filter_hosts:
if item.find(i) != -1:
return False
return True
<|reserved_special_token_0|>
@udf(returnType=BooleanType())
def contains_host(item):
for i in contains_hosts:
if item.find(i) != -1:
return True
return False
<|reserved_special_token_0|>
hosts.sort()
<|reserved_special_token_0|>
df2.to_csv('tf22.csv')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
price_pattern = re.compile('^\\d+\\.\\d\\d$')
myconf = SparkConf()
myconf.setAppName('test').setMaster('local[40]')
myconf.set('spark.executor.instances', '40')
myconf.set('spark.driver.memory', '6G')
myconf.set('spark.executor.cores', '40')
myconf.set('spark.task.cpus', '40')
myconf.set('spark.jars.packages',
'org.mongodb.spark:mongo-spark-connector_2.11:2.4.1')
spark = SparkSession.builder.config(conf=myconf).getOrCreate()
logger = spark._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)
filter_hosts = ['vivo', 'google.com', 'google.cn', 'oppomobile',
'baidu.com', 'hicloud']
@udf(returnType=BooleanType())
def filter_host(item):
for i in filter_hosts:
if item.find(i) != -1:
return False
return True
contains_hosts = ['jd.com']
@udf(returnType=BooleanType())
def contains_host(item):
for i in contains_hosts:
if item.find(i) != -1:
return True
return False
df = spark.read.format('mongo').option('uri',
'mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420').option(
'spark.mongodb.input.partitioner', 'MongoSplitVectorPartitioner').load()
df = df.filter(filter_host('host')).select(['app_id', 'host', 'session_id'])
hosts = df.select(['host']).distinct().rdd.map(lambda r: r['host']).collect()
hosts.sort()
df1 = df.groupBy('app_id', 'session_id').pivot('host', hosts).agg(F.count(
'host')).fillna(0)
df2 = df1.toPandas()
df2.to_csv('tf22.csv')
<|reserved_special_token_1|>
import tensorflow
from pyspark.sql.functions import split
from pyspark.ml.fpm import FPGrowth
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.functions import udf, array
import re
from pyspark.sql.types import *
import pyspark.sql.functions as F
price_pattern = re.compile('^\\d+\\.\\d\\d$')
myconf = SparkConf()
myconf.setAppName('test').setMaster('local[40]')
myconf.set('spark.executor.instances', '40')
myconf.set('spark.driver.memory', '6G')
myconf.set('spark.executor.cores', '40')
myconf.set('spark.task.cpus', '40')
myconf.set('spark.jars.packages',
'org.mongodb.spark:mongo-spark-connector_2.11:2.4.1')
spark = SparkSession.builder.config(conf=myconf).getOrCreate()
logger = spark._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)
filter_hosts = ['vivo', 'google.com', 'google.cn', 'oppomobile',
'baidu.com', 'hicloud']
@udf(returnType=BooleanType())
def filter_host(item):
for i in filter_hosts:
if item.find(i) != -1:
return False
return True
contains_hosts = ['jd.com']
@udf(returnType=BooleanType())
def contains_host(item):
for i in contains_hosts:
if item.find(i) != -1:
return True
return False
df = spark.read.format('mongo').option('uri',
'mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420').option(
'spark.mongodb.input.partitioner', 'MongoSplitVectorPartitioner').load()
df = df.filter(filter_host('host')).select(['app_id', 'host', 'session_id'])
hosts = df.select(['host']).distinct().rdd.map(lambda r: r['host']).collect()
hosts.sort()
df1 = df.groupBy('app_id', 'session_id').pivot('host', hosts).agg(F.count(
'host')).fillna(0)
df2 = df1.toPandas()
df2.to_csv('tf22.csv')
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow
from pyspark.sql.functions import split
from pyspark.ml.fpm import FPGrowth
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark.sql.functions import udf, array
import re
from pyspark.sql.types import *
import pyspark.sql.functions as F
price_pattern = re.compile(r'^\d+\.\d\d$')
myconf = SparkConf()
myconf.setAppName("test").setMaster("local[40]")
myconf.set('spark.executor.instances','40')
myconf.set('spark.driver.memory','6G')
#myconf.set('spark.executor.memory','1G')
myconf.set('spark.executor.cores','40')
myconf.set('spark.task.cpus','40')
# 指定连接器对应的spark-package
myconf.set("spark.jars.packages","org.mongodb.spark:mongo-spark-connector_2.11:2.4.1")
spark = SparkSession.builder.config(conf=myconf).getOrCreate()
logger = spark._jvm.org.apache.log4j
logger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)
filter_hosts=["vivo","google.com","google.cn","oppomobile","baidu.com","hicloud"]
@udf(returnType=BooleanType())
def filter_host(item):
for i in filter_hosts:
if item.find(i) != -1:
return False
return True
contains_hosts=["jd.com"]
@udf(returnType=BooleanType())
def contains_host(item):
for i in contains_hosts:
if item.find(i) != -1:
return True
return False
df=spark.read.format("mongo").option("uri","mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420").option("spark.mongodb.input.partitioner","MongoSplitVectorPartitioner").load()
df=df.filter(filter_host('host')).select(['app_id','host','session_id'])
hosts=df.select(['host']).distinct().rdd.map(lambda r : r['host']).collect()
hosts.sort()
df1=df.groupBy('app_id','session_id') \
.pivot('host', hosts) \
.agg(F.count('host')).fillna(0)
df2=df1.toPandas()
df2.to_csv("tf22.csv")
|
flexible
|
{
"blob_id": "e7d63c3b56459297eb67c56e93a3c640d93e5f6d",
"index": 8683,
"step-1": "<mask token>\n\n\n@udf(returnType=BooleanType())\ndef filter_host(item):\n for i in filter_hosts:\n if item.find(i) != -1:\n return False\n return True\n\n\n<mask token>\n\n\n@udf(returnType=BooleanType())\ndef contains_host(item):\n for i in contains_hosts:\n if item.find(i) != -1:\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\nmyconf.setAppName('test').setMaster('local[40]')\nmyconf.set('spark.executor.instances', '40')\nmyconf.set('spark.driver.memory', '6G')\nmyconf.set('spark.executor.cores', '40')\nmyconf.set('spark.task.cpus', '40')\nmyconf.set('spark.jars.packages',\n 'org.mongodb.spark:mongo-spark-connector_2.11:2.4.1')\n<mask token>\nlogger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)\n<mask token>\n\n\n@udf(returnType=BooleanType())\ndef filter_host(item):\n for i in filter_hosts:\n if item.find(i) != -1:\n return False\n return True\n\n\n<mask token>\n\n\n@udf(returnType=BooleanType())\ndef contains_host(item):\n for i in contains_hosts:\n if item.find(i) != -1:\n return True\n return False\n\n\n<mask token>\nhosts.sort()\n<mask token>\ndf2.to_csv('tf22.csv')\n",
"step-3": "<mask token>\nprice_pattern = re.compile('^\\\\d+\\\\.\\\\d\\\\d$')\nmyconf = SparkConf()\nmyconf.setAppName('test').setMaster('local[40]')\nmyconf.set('spark.executor.instances', '40')\nmyconf.set('spark.driver.memory', '6G')\nmyconf.set('spark.executor.cores', '40')\nmyconf.set('spark.task.cpus', '40')\nmyconf.set('spark.jars.packages',\n 'org.mongodb.spark:mongo-spark-connector_2.11:2.4.1')\nspark = SparkSession.builder.config(conf=myconf).getOrCreate()\nlogger = spark._jvm.org.apache.log4j\nlogger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)\nfilter_hosts = ['vivo', 'google.com', 'google.cn', 'oppomobile',\n 'baidu.com', 'hicloud']\n\n\n@udf(returnType=BooleanType())\ndef filter_host(item):\n for i in filter_hosts:\n if item.find(i) != -1:\n return False\n return True\n\n\ncontains_hosts = ['jd.com']\n\n\n@udf(returnType=BooleanType())\ndef contains_host(item):\n for i in contains_hosts:\n if item.find(i) != -1:\n return True\n return False\n\n\ndf = spark.read.format('mongo').option('uri',\n 'mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420').option(\n 'spark.mongodb.input.partitioner', 'MongoSplitVectorPartitioner').load()\ndf = df.filter(filter_host('host')).select(['app_id', 'host', 'session_id'])\nhosts = df.select(['host']).distinct().rdd.map(lambda r: r['host']).collect()\nhosts.sort()\ndf1 = df.groupBy('app_id', 'session_id').pivot('host', hosts).agg(F.count(\n 'host')).fillna(0)\ndf2 = df1.toPandas()\ndf2.to_csv('tf22.csv')\n",
"step-4": "import tensorflow\nfrom pyspark.sql.functions import split\nfrom pyspark.ml.fpm import FPGrowth\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.functions import udf, array\nimport re\nfrom pyspark.sql.types import *\nimport pyspark.sql.functions as F\nprice_pattern = re.compile('^\\\\d+\\\\.\\\\d\\\\d$')\nmyconf = SparkConf()\nmyconf.setAppName('test').setMaster('local[40]')\nmyconf.set('spark.executor.instances', '40')\nmyconf.set('spark.driver.memory', '6G')\nmyconf.set('spark.executor.cores', '40')\nmyconf.set('spark.task.cpus', '40')\nmyconf.set('spark.jars.packages',\n 'org.mongodb.spark:mongo-spark-connector_2.11:2.4.1')\nspark = SparkSession.builder.config(conf=myconf).getOrCreate()\nlogger = spark._jvm.org.apache.log4j\nlogger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)\nfilter_hosts = ['vivo', 'google.com', 'google.cn', 'oppomobile',\n 'baidu.com', 'hicloud']\n\n\n@udf(returnType=BooleanType())\ndef filter_host(item):\n for i in filter_hosts:\n if item.find(i) != -1:\n return False\n return True\n\n\ncontains_hosts = ['jd.com']\n\n\n@udf(returnType=BooleanType())\ndef contains_host(item):\n for i in contains_hosts:\n if item.find(i) != -1:\n return True\n return False\n\n\ndf = spark.read.format('mongo').option('uri',\n 'mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420').option(\n 'spark.mongodb.input.partitioner', 'MongoSplitVectorPartitioner').load()\ndf = df.filter(filter_host('host')).select(['app_id', 'host', 'session_id'])\nhosts = df.select(['host']).distinct().rdd.map(lambda r: r['host']).collect()\nhosts.sort()\ndf1 = df.groupBy('app_id', 'session_id').pivot('host', hosts).agg(F.count(\n 'host')).fillna(0)\ndf2 = df1.toPandas()\ndf2.to_csv('tf22.csv')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport tensorflow\nfrom pyspark.sql.functions import split\nfrom pyspark.ml.fpm import FPGrowth\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.functions import udf, array\nimport re\nfrom pyspark.sql.types import *\nimport pyspark.sql.functions as F\nprice_pattern = re.compile(r'^\\d+\\.\\d\\d$')\nmyconf = SparkConf()\nmyconf.setAppName(\"test\").setMaster(\"local[40]\")\nmyconf.set('spark.executor.instances','40')\nmyconf.set('spark.driver.memory','6G')\n#myconf.set('spark.executor.memory','1G')\nmyconf.set('spark.executor.cores','40')\nmyconf.set('spark.task.cpus','40')\n\n# 指定连接器对应的spark-package\nmyconf.set(\"spark.jars.packages\",\"org.mongodb.spark:mongo-spark-connector_2.11:2.4.1\")\nspark = SparkSession.builder.config(conf=myconf).getOrCreate()\nlogger = spark._jvm.org.apache.log4j\nlogger.LogManager.getRootLogger().setLevel(logger.Level.FATAL)\n\nfilter_hosts=[\"vivo\",\"google.com\",\"google.cn\",\"oppomobile\",\"baidu.com\",\"hicloud\"]\n@udf(returnType=BooleanType())\ndef filter_host(item):\n for i in filter_hosts:\n if item.find(i) != -1:\n return False\n return True\n\ncontains_hosts=[\"jd.com\"]\n@udf(returnType=BooleanType())\ndef contains_host(item):\n for i in contains_hosts:\n if item.find(i) != -1:\n return True\n return False\n\ndf=spark.read.format(\"mongo\").option(\"uri\",\"mongodb://192.168.0.13:27017/jicheng.autopkgcatpure20210420\").option(\"spark.mongodb.input.partitioner\",\"MongoSplitVectorPartitioner\").load()\ndf=df.filter(filter_host('host')).select(['app_id','host','session_id'])\n\nhosts=df.select(['host']).distinct().rdd.map(lambda r : r['host']).collect()\nhosts.sort()\n\ndf1=df.groupBy('app_id','session_id') \\\n .pivot('host', hosts) \\\n .agg(F.count('host')).fillna(0)\ndf2=df1.toPandas()\ndf2.to_csv(\"tf22.csv\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def reverse(text):
"""将字符串翻转"""
return text[::-1]
def is_palindrome(text):
print(e for e in text if e.isalnum())
m = ''.join(e for e in text if e.isalnum())
print(m)
"""是否是回文数"""
return m == reverse(m)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reverse(text):
"""将字符串翻转"""
return text[::-1]
def is_palindrome(text):
print(e for e in text if e.isalnum())
m = ''.join(e for e in text if e.isalnum())
print(m)
"""是否是回文数"""
return m == reverse(m)
<|reserved_special_token_0|>
if is_palindrome(something):
print('是,这是个回文数')
else:
print('不,这不是回文数')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reverse(text):
"""将字符串翻转"""
return text[::-1]
def is_palindrome(text):
print(e for e in text if e.isalnum())
m = ''.join(e for e in text if e.isalnum())
print(m)
"""是否是回文数"""
return m == reverse(m)
something = input('请输入:')
if is_palindrome(something):
print('是,这是个回文数')
else:
print('不,这不是回文数')
<|reserved_special_token_1|>
import string
def reverse(text):
"""将字符串翻转"""
return text[::-1]
def is_palindrome(text):
print(e for e in text if e.isalnum())
m = ''.join(e for e in text if e.isalnum())
print(m)
"""是否是回文数"""
return m == reverse(m)
something = input('请输入:')
if is_palindrome(something):
print('是,这是个回文数')
else:
print('不,这不是回文数')
<|reserved_special_token_1|>
# !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: sx
import string
def reverse(text):
"""将字符串翻转"""
return text[::-1]
def is_palindrome(text):
print(e for e in text if e.isalnum())
# 去掉标点空格
m = ''.join(e for e in text if e.isalnum())
print(m)
"""是否是回文数"""
return m == reverse(m)
something = input('请输入:')
if is_palindrome(something):
print('是,这是个回文数')
else:
print('不,这不是回文数')
|
flexible
|
{
"blob_id": "03a1f9f533f7550db32fa25578ef2f7f4c741510",
"index": 8583,
"step-1": "<mask token>\n\n\ndef reverse(text):\n \"\"\"将字符串翻转\"\"\"\n return text[::-1]\n\n\ndef is_palindrome(text):\n print(e for e in text if e.isalnum())\n m = ''.join(e for e in text if e.isalnum())\n print(m)\n \"\"\"是否是回文数\"\"\"\n return m == reverse(m)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef reverse(text):\n \"\"\"将字符串翻转\"\"\"\n return text[::-1]\n\n\ndef is_palindrome(text):\n print(e for e in text if e.isalnum())\n m = ''.join(e for e in text if e.isalnum())\n print(m)\n \"\"\"是否是回文数\"\"\"\n return m == reverse(m)\n\n\n<mask token>\nif is_palindrome(something):\n print('是,这是个回文数')\nelse:\n print('不,这不是回文数')\n",
"step-3": "<mask token>\n\n\ndef reverse(text):\n \"\"\"将字符串翻转\"\"\"\n return text[::-1]\n\n\ndef is_palindrome(text):\n print(e for e in text if e.isalnum())\n m = ''.join(e for e in text if e.isalnum())\n print(m)\n \"\"\"是否是回文数\"\"\"\n return m == reverse(m)\n\n\nsomething = input('请输入:')\nif is_palindrome(something):\n print('是,这是个回文数')\nelse:\n print('不,这不是回文数')\n",
"step-4": "import string\n\n\ndef reverse(text):\n \"\"\"将字符串翻转\"\"\"\n return text[::-1]\n\n\ndef is_palindrome(text):\n print(e for e in text if e.isalnum())\n m = ''.join(e for e in text if e.isalnum())\n print(m)\n \"\"\"是否是回文数\"\"\"\n return m == reverse(m)\n\n\nsomething = input('请输入:')\nif is_palindrome(something):\n print('是,这是个回文数')\nelse:\n print('不,这不是回文数')\n",
"step-5": "# !/usr/bin/env python\n# -*- coding:utf-8 -*- \n# Author: sx\nimport string\n\n\ndef reverse(text):\n \"\"\"将字符串翻转\"\"\"\n return text[::-1]\n\n\ndef is_palindrome(text):\n \n print(e for e in text if e.isalnum())\n # 去掉标点空格\n m = ''.join(e for e in text if e.isalnum())\n print(m)\n \"\"\"是否是回文数\"\"\"\n return m == reverse(m)\n\n\nsomething = input('请输入:')\nif is_palindrome(something):\n print('是,这是个回文数')\nelse:\n print('不,这不是回文数')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def action_scaling(env, action_scaler):
"""
This is actually going to just be "action scaling". Because,
it's all about the ratio, and the ratio doesn't change!
"""
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
action_scaler = float(action_scaler)
state_scaler_array = np.ones((state_dim,), dtype=np.float32)
action_scaler_array = np.ones((action_dim,), dtype=np.float32
) * action_scaler
return np.concatenate([state_scaler_array, action_scaler_array], axis=0)
def per_dim_scaling(env, *args):
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
assert len(args) == state_dim + action_dim
return np.array(args, dtype=np.float32)
<|reserved_special_token_0|>
def get_scaling_array(env, scaling_function_string):
scaling_string_parsed = scaling_function_string.split('__')
scaling_method, scaling_args = scaling_string_parsed[0
], scaling_string_parsed[1:]
scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
return scaling_array
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def uniform_scaling(*args, **kwargs):
return 1.0
def action_scaling(env, action_scaler):
"""
This is actually going to just be "action scaling". Because,
it's all about the ratio, and the ratio doesn't change!
"""
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
action_scaler = float(action_scaler)
state_scaler_array = np.ones((state_dim,), dtype=np.float32)
action_scaler_array = np.ones((action_dim,), dtype=np.float32
) * action_scaler
return np.concatenate([state_scaler_array, action_scaler_array], axis=0)
def per_dim_scaling(env, *args):
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
assert len(args) == state_dim + action_dim
return np.array(args, dtype=np.float32)
<|reserved_special_token_0|>
def get_scaling_array(env, scaling_function_string):
scaling_string_parsed = scaling_function_string.split('__')
scaling_method, scaling_args = scaling_string_parsed[0
], scaling_string_parsed[1:]
scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
return scaling_array
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def uniform_scaling(*args, **kwargs):
return 1.0
def action_scaling(env, action_scaler):
"""
This is actually going to just be "action scaling". Because,
it's all about the ratio, and the ratio doesn't change!
"""
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
action_scaler = float(action_scaler)
state_scaler_array = np.ones((state_dim,), dtype=np.float32)
action_scaler_array = np.ones((action_dim,), dtype=np.float32
) * action_scaler
return np.concatenate([state_scaler_array, action_scaler_array], axis=0)
def per_dim_scaling(env, *args):
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
assert len(args) == state_dim + action_dim
return np.array(args, dtype=np.float32)
def ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):
"""
Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,
the rest of the state is the pos/vel of everything.
"""
state_dim = len(env.observation_space.low)
action_dim = len(env.action_space.low)
num_com_features = 2
num_other_features = state_dim - num_com_features
com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(
com_scaling)
other_feature_scaler = np.ones((num_other_features,), dtype=np.float32
) * float(other_feature_scaling)
action_scaler = np.ones((action_dim,), dtype=np.float32) * float(
action_scaling)
return np.concatenate([com_scaler, other_feature_scaler, action_scaler],
axis=0)
print(
'Just a note that you should PROBABLY be normalizing one way or another for this one.'
)
<|reserved_special_token_0|>
_SCALING_FUNCTIONS = {'action_scaling': action_scaling, 'per_dim_scaling':
per_dim_scaling, 'ant_maze_scaling': ant_maze_scaling}
def get_scaling_array(env, scaling_function_string):
scaling_string_parsed = scaling_function_string.split('__')
scaling_method, scaling_args = scaling_string_parsed[0
], scaling_string_parsed[1:]
scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
return scaling_array
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
def uniform_scaling(*args, **kwargs):
return 1.0
def action_scaling(env, action_scaler):
"""
This is actually going to just be "action scaling". Because,
it's all about the ratio, and the ratio doesn't change!
"""
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
action_scaler = float(action_scaler)
state_scaler_array = np.ones((state_dim,), dtype=np.float32)
action_scaler_array = np.ones((action_dim,), dtype=np.float32
) * action_scaler
return np.concatenate([state_scaler_array, action_scaler_array], axis=0)
def per_dim_scaling(env, *args):
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print('Using dm_control so need to get state_dim differently')
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
assert len(args) == state_dim + action_dim
return np.array(args, dtype=np.float32)
def ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):
"""
Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,
the rest of the state is the pos/vel of everything.
"""
state_dim = len(env.observation_space.low)
action_dim = len(env.action_space.low)
num_com_features = 2
num_other_features = state_dim - num_com_features
com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(
com_scaling)
other_feature_scaler = np.ones((num_other_features,), dtype=np.float32
) * float(other_feature_scaling)
action_scaler = np.ones((action_dim,), dtype=np.float32) * float(
action_scaling)
return np.concatenate([com_scaler, other_feature_scaler, action_scaler],
axis=0)
print(
'Just a note that you should PROBABLY be normalizing one way or another for this one.'
)
<|reserved_special_token_0|>
_SCALING_FUNCTIONS = {'action_scaling': action_scaling, 'per_dim_scaling':
per_dim_scaling, 'ant_maze_scaling': ant_maze_scaling}
def get_scaling_array(env, scaling_function_string):
scaling_string_parsed = scaling_function_string.split('__')
scaling_method, scaling_args = scaling_string_parsed[0
], scaling_string_parsed[1:]
scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
return scaling_array
<|reserved_special_token_1|>
"""
It's annoying that we have to do it here but for something like Ant, we're not going to be able to
specify it easily inside of the rbf_hyper_parameters file. Because, for something like Ant, we have
2 COM dimensions, and Bipedal we have 1.
So, we're going to do something similar to shaping_functions.
The way it'll work is, to make it modular, we'll take in a single string that we then separate out to
get the scaling. I like that. So, it'll be something like, for Ant:
{
uniform: func_get_ones()
special_loc_scaling: func_special_loc(com, rest_state, actions)
}
There's an argument for making these things know about the environment already. Only because we need
the state and action dimensions. So maybe you pass the environment into the constructor?
It's sort of annoying -- do we do the automatic scaling or not? I'd say leave the option, for something like Ant,
it's unavoidable to use it, even though it does make the problem non-stationary.
And it figures out the rest from there.
So, in the end this will just return an array.
"""
import numpy as np
def uniform_scaling(*args, **kwargs):
return 1.
def action_scaling(env, action_scaler):
"""
This is actually going to just be "action scaling". Because,
it's all about the ratio, and the ratio doesn't change!
"""
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print("Using dm_control so need to get state_dim differently")
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
# state_scaling = float(state_scaling)
action_scaler = float(action_scaler)
state_scaler_array = np.ones((state_dim,), dtype=np.float32)
action_scaler_array = np.ones((action_dim,), dtype=np.float32) * action_scaler
return np.concatenate([state_scaler_array, action_scaler_array], axis=0)
def per_dim_scaling(env, *args):
try:
state_dim = len(env.observation_space.low)
except AttributeError:
print("Using dm_control so need to get state_dim differently")
state_dim = len(env.observation_space['observations'].low)
action_dim = len(env.action_space.low)
assert len(args) == state_dim + action_dim
return np.array(args, dtype=np.float32)
def ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):
"""
Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,
the rest of the state is the pos/vel of everything.
"""
state_dim = len(env.observation_space.low)
action_dim = len(env.action_space.low)
num_com_features = 2
num_other_features = state_dim - num_com_features
com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(com_scaling)
other_feature_scaler = np.ones((num_other_features,), dtype=np.float32) * float(other_feature_scaling)
action_scaler = np.ones((action_dim,), dtype=np.float32) * float(action_scaling)
return np.concatenate([com_scaler, other_feature_scaler, action_scaler], axis=0)
# assert
print("Just a note that you should PROBABLY be normalizing one way or another for this one.")
"""
This has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.
That lets us pass stuff in through a CLI interface a bit easier.
"""
_SCALING_FUNCTIONS = {
'action_scaling': action_scaling,
'per_dim_scaling': per_dim_scaling,
'ant_maze_scaling': ant_maze_scaling,
}
def get_scaling_array(env, scaling_function_string):
scaling_string_parsed = scaling_function_string.split("__")
scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]
scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
return scaling_array
# class ScalingFunctions:
# """
# This has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.
# That lets us pass stuff in through a CLI interface a bit easier.
# """
# SCALING_FUNCTIONS = {
# 'state_action_scaling': state_action_scaling,
# 'per_dim_scaling': per_dim_scaling
# }
# def __init__(self, env, scaling_string):
# scaling_string_parsed = scaling_string.split("__")
# scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]
# scaling_array = self.SCALING_FUNCTIONS[scaling_method](env, *scaling_args)
# # return scaling_array
|
flexible
|
{
"blob_id": "5529813e10e4a30a60c28242be9d1a8822fb58af",
"index": 9685,
"step-1": "<mask token>\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\n<mask token>\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-2": "<mask token>\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.0\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\n<mask token>\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-3": "<mask token>\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.0\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\ndef ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):\n \"\"\"\n Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,\n the rest of the state is the pos/vel of everything.\n \"\"\"\n state_dim = len(env.observation_space.low)\n action_dim = len(env.action_space.low)\n num_com_features = 2\n num_other_features = state_dim - num_com_features\n com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(\n com_scaling)\n other_feature_scaler = np.ones((num_other_features,), dtype=np.float32\n ) * float(other_feature_scaling)\n action_scaler = np.ones((action_dim,), dtype=np.float32) * float(\n action_scaling)\n return np.concatenate([com_scaler, other_feature_scaler, action_scaler],\n axis=0)\n print(\n 'Just a note that you should PROBABLY be normalizing one way or another for this one.'\n )\n\n\n<mask token>\n_SCALING_FUNCTIONS = {'action_scaling': action_scaling, 'per_dim_scaling':\n per_dim_scaling, 'ant_maze_scaling': ant_maze_scaling}\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-4": "<mask token>\nimport numpy as np\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.0\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n action_scaler = float(action_scaler)\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32\n ) * action_scaler\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print('Using dm_control so need to get state_dim differently')\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\n\ndef ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):\n \"\"\"\n Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,\n the rest of the state is the pos/vel of everything.\n \"\"\"\n state_dim = len(env.observation_space.low)\n action_dim = len(env.action_space.low)\n num_com_features = 2\n num_other_features = state_dim - num_com_features\n com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(\n com_scaling)\n other_feature_scaler = np.ones((num_other_features,), dtype=np.float32\n ) * float(other_feature_scaling)\n action_scaler = np.ones((action_dim,), dtype=np.float32) * float(\n action_scaling)\n return np.concatenate([com_scaler, other_feature_scaler, action_scaler],\n axis=0)\n print(\n 'Just a note that you should PROBABLY be normalizing one way or another for this one.'\n )\n\n\n<mask token>\n_SCALING_FUNCTIONS = {'action_scaling': action_scaling, 'per_dim_scaling':\n per_dim_scaling, 'ant_maze_scaling': ant_maze_scaling}\n\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split('__')\n scaling_method, scaling_args = scaling_string_parsed[0\n ], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n",
"step-5": "\"\"\"\nIt's annoying that we have to do it here but for something like Ant, we're not going to be able to\nspecify it easily inside of the rbf_hyper_parameters file. Because, for something like Ant, we have\n2 COM dimensions, and Bipedal we have 1. \n\nSo, we're going to do something similar to shaping_functions.\n\nThe way it'll work is, to make it modular, we'll take in a single string that we then separate out to\nget the scaling. I like that. So, it'll be something like, for Ant:\n {\n uniform: func_get_ones()\n special_loc_scaling: func_special_loc(com, rest_state, actions)\n }\n\nThere's an argument for making these things know about the environment already. Only because we need\nthe state and action dimensions. So maybe you pass the environment into the constructor?\n\nIt's sort of annoying -- do we do the automatic scaling or not? I'd say leave the option, for something like Ant,\nit's unavoidable to use it, even though it does make the problem non-stationary.\n\nAnd it figures out the rest from there.\n\nSo, in the end this will just return an array. \n\"\"\"\n\nimport numpy as np\n\n\ndef uniform_scaling(*args, **kwargs):\n return 1.\n\n\ndef action_scaling(env, action_scaler):\n \"\"\"\n This is actually going to just be \"action scaling\". Because,\n it's all about the ratio, and the ratio doesn't change!\n \"\"\"\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print(\"Using dm_control so need to get state_dim differently\")\n state_dim = len(env.observation_space['observations'].low)\n\n action_dim = len(env.action_space.low)\n\n # state_scaling = float(state_scaling)\n action_scaler = float(action_scaler)\n\n state_scaler_array = np.ones((state_dim,), dtype=np.float32)\n action_scaler_array = np.ones((action_dim,), dtype=np.float32) * action_scaler\n\n return np.concatenate([state_scaler_array, action_scaler_array], axis=0)\n\ndef per_dim_scaling(env, *args):\n try:\n state_dim = len(env.observation_space.low)\n except AttributeError:\n print(\"Using dm_control so need to get state_dim differently\")\n state_dim = len(env.observation_space['observations'].low)\n action_dim = len(env.action_space.low)\n assert len(args) == state_dim + action_dim\n return np.array(args, dtype=np.float32)\n\ndef ant_maze_scaling(env, com_scaling, other_feature_scaling, action_scaling):\n \"\"\"\n Not sure how this is correct, but: I'm assuming that the COM is the first 2 states. Then,\n the rest of the state is the pos/vel of everything.\n \"\"\"\n state_dim = len(env.observation_space.low)\n action_dim = len(env.action_space.low)\n\n num_com_features = 2\n num_other_features = state_dim - num_com_features\n\n com_scaler = np.ones((num_com_features,), dtype=np.float32) * float(com_scaling)\n other_feature_scaler = np.ones((num_other_features,), dtype=np.float32) * float(other_feature_scaling)\n action_scaler = np.ones((action_dim,), dtype=np.float32) * float(action_scaling)\n\n return np.concatenate([com_scaler, other_feature_scaler, action_scaler], axis=0)\n\n # assert \n\n print(\"Just a note that you should PROBABLY be normalizing one way or another for this one.\")\n\n\n\n\"\"\"\nThis has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.\nThat lets us pass stuff in through a CLI interface a bit easier.\n\"\"\"\n_SCALING_FUNCTIONS = {\n 'action_scaling': action_scaling,\n 'per_dim_scaling': per_dim_scaling,\n 'ant_maze_scaling': ant_maze_scaling,\n}\n\ndef get_scaling_array(env, scaling_function_string):\n scaling_string_parsed = scaling_function_string.split(\"__\")\n scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]\n scaling_array = _SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n return scaling_array\n\n\n\n# class ScalingFunctions:\n# \"\"\"\n# This has an interesting interface -- scaling_string is a string where the arguments are double-underscore-separated.\n# That lets us pass stuff in through a CLI interface a bit easier.\n# \"\"\"\n# SCALING_FUNCTIONS = {\n# 'state_action_scaling': state_action_scaling,\n# 'per_dim_scaling': per_dim_scaling \n# }\n\n# def __init__(self, env, scaling_string):\n# scaling_string_parsed = scaling_string.split(\"__\")\n# scaling_method, scaling_args = scaling_string_parsed[0], scaling_string_parsed[1:]\n# scaling_array = self.SCALING_FUNCTIONS[scaling_method](env, *scaling_args)\n# # return scaling_array\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from selenium import webdriver
import time
with webdriver.Chrome() as browser:
browser.get("http://suninjuly.github.io/selects1.html")
time.sleep(1)
x = int(browser.find_element_by_id("num1").text)
y = int(browser.find_element_by_id("num2").text)
sum_xy = str(int(x)+int(y))
browser.find_element_by_tag_name("select").click()
sum_opt = browser.find_element_by_css_selector("[value='{}']".format(sum_xy))
sum_opt.click()
browser.find_element_by_tag_name("button").click()
time.sleep(5)
|
normal
|
{
"blob_id": "42be9077ec51a9be1d4923011a38cd64d829f876",
"index": 1529,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith webdriver.Chrome() as browser:\n browser.get('http://suninjuly.github.io/selects1.html')\n time.sleep(1)\n x = int(browser.find_element_by_id('num1').text)\n y = int(browser.find_element_by_id('num2').text)\n sum_xy = str(int(x) + int(y))\n browser.find_element_by_tag_name('select').click()\n sum_opt = browser.find_element_by_css_selector(\"[value='{}']\".format(\n sum_xy))\n sum_opt.click()\n browser.find_element_by_tag_name('button').click()\n time.sleep(5)\n",
"step-3": "from selenium import webdriver\nimport time\nwith webdriver.Chrome() as browser:\n browser.get('http://suninjuly.github.io/selects1.html')\n time.sleep(1)\n x = int(browser.find_element_by_id('num1').text)\n y = int(browser.find_element_by_id('num2').text)\n sum_xy = str(int(x) + int(y))\n browser.find_element_by_tag_name('select').click()\n sum_opt = browser.find_element_by_css_selector(\"[value='{}']\".format(\n sum_xy))\n sum_opt.click()\n browser.find_element_by_tag_name('button').click()\n time.sleep(5)\n",
"step-4": "from selenium import webdriver\nimport time\n\nwith webdriver.Chrome() as browser:\n browser.get(\"http://suninjuly.github.io/selects1.html\")\n time.sleep(1)\n x = int(browser.find_element_by_id(\"num1\").text)\n y = int(browser.find_element_by_id(\"num2\").text)\n sum_xy = str(int(x)+int(y))\n browser.find_element_by_tag_name(\"select\").click()\n sum_opt = browser.find_element_by_css_selector(\"[value='{}']\".format(sum_xy))\n sum_opt.click()\n browser.find_element_by_tag_name(\"button\").click()\n time.sleep(5)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print('bizzfizz')
elif num % 3 == 0:
print('fizz')
elif num % 5 == 0:
print('bizz')
else:
print(num)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print('bizzfizz')
elif num % 3 == 0:
print('fizz')
elif num % 5 == 0:
print('bizz')
else:
print(num)
<|reserved_special_token_0|>
maths(int(value))
<|reserved_special_token_1|>
def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print('bizzfizz')
elif num % 3 == 0:
print('fizz')
elif num % 5 == 0:
print('bizz')
else:
print(num)
value = input('enter the value ')
maths(int(value))
<|reserved_special_token_1|>
def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print("bizzfizz")
elif num % 3 == 0:
print("fizz")
elif num % 5 == 0:
print("bizz")
else:
print(num)
value=input("enter the value ")
maths(int(value))
|
flexible
|
{
"blob_id": "91f83adbe01e2d8070f9286031b77eae71beb83e",
"index": 1107,
"step-1": "<mask token>\n",
"step-2": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print('bizzfizz')\n elif num % 3 == 0:\n print('fizz')\n elif num % 5 == 0:\n print('bizz')\n else:\n print(num)\n\n\n<mask token>\n",
"step-3": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print('bizzfizz')\n elif num % 3 == 0:\n print('fizz')\n elif num % 5 == 0:\n print('bizz')\n else:\n print(num)\n\n\n<mask token>\nmaths(int(value))\n",
"step-4": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print('bizzfizz')\n elif num % 3 == 0:\n print('fizz')\n elif num % 5 == 0:\n print('bizz')\n else:\n print(num)\n\n\nvalue = input('enter the value ')\nmaths(int(value))\n",
"step-5": "def maths(num):\n int(num)\n if num % 5 == 0 and num % 3 == 0:\n print(\"bizzfizz\")\n elif num % 3 == 0:\n print(\"fizz\")\n elif num % 5 == 0:\n print(\"bizz\")\n else:\n print(num)\n\n\nvalue=input(\"enter the value \")\nmaths(int(value))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# encoding: utf-8
import paramiko
import select
import os
import sys
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
host = "47.107.229.100"
user = "root"
pwd = "aliyun1996874353...A"
class SSH:
def __init__(self, host, user, pwd, port=22):
self.host = host
self.user = user
self.pwd = pwd
self.port = port
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(host, username=user, password=pwd, port=port)
def exec_cmd(self, cmd):
stdin, stdout, stderr = self.client.exec_command(cmd)
res, err = stdout.read(), stderr.read()
result = res if res else err ##这里我们使用三元运算
print("##" + result.decode(encoding="utf-8").replace('\n', '', -1) + "##")
def put_file(self, local_file, service_file):
tran = paramiko.Transport(self.host, self.port)
tran.connect(username=self.user, password=self.pwd)
sftp = paramiko.SFTPClient.from_transport(tran)
sftp.put(local_file, service_file)
tran.close()
def get_file(self, service_file, local_file):
self.client.get_transport()
sftp = paramiko.SFTPClient.from_transport(self.client)
sftp.get(service_file, local_file)
def c_connect(self):
channel = self.client.open_session()
def close_ssh(self):
self.client.close()
def test():
import paramiko
import os
import select
import sys
# 建立一个socket
trans = paramiko.Transport((host, 22))
trans.start_client()
# 如果使用rsa密钥登录的话
'''
default_key_file = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
prikey = paramiko.RSAKey.from_private_key_file(default_key_file)
trans.auth_publickey(username='super', key=prikey)
'''
# 如果使用用户名和密码登录
trans.auth_password(username=user, password=pwd)
# 打开一个通道
channel = trans.open_session()
# 获取终端
channel.get_pty()
# 激活终端,这样就可以登录到终端了,就和我们用类似于xshell登录系统一样
channel.invoke_shell()
# 下面就可以执行你所有的操作,用select实现
# 对输入终端sys.stdin和 通道进行监控,
# 当用户在终端输入命令后,将命令交给channel通道,这个时候sys.stdin就发生变化,select就可以感知
# channel的发送命令、获取结果过程其实就是一个socket的发送和接受信息的过程
while True:
readlist, writelist, errlist = select.select([channel, sys.stdin, ], [], []) # 如果是用户输入命令了,sys.stdin发生变化
if sys.stdin in readlist: # 获取输入的内容
input_cmd = sys.stdin.read(1) # 将命令发送给服务器
channel.sendall(input_cmd) # 服务器返回了结果,channel通道接受到结果,发生变化 select感知到
if channel in readlist: # 获取结果
result = channel.recv(1024) # 断开连接后退出
if len(result) == 0:
print("\r\n**** EOF **** \r\n")
break # 输出到屏幕
sys.stdout.write(result.decode())
sys.stdout.flush() # 关闭通道
channel.close() # 关闭链接
trans.close()
if __name__ == '__main__':
# put_file()
ssh = SSH(host, user, pwd)
# ssh.put_file("easyops.sh", "/tmp/easyops.sh")
# ssh.exec_cmd("ls /tmp")
# ssh.exec_cmd("sh /tmp/easyops.sh")
# ssh.c_connect()
test()
|
normal
|
{
"blob_id": "2342a651ec45623b887c4bc1168adb0731ba5ff6",
"index": 8443,
"step-1": "<mask token>\n\n\nclass SSH:\n <mask token>\n\n def exec_cmd(self, cmd):\n stdin, stdout, stderr = self.client.exec_command(cmd)\n res, err = stdout.read(), stderr.read()\n result = res if res else err\n print('##' + result.decode(encoding='utf-8').replace('\\n', '', -1) +\n '##')\n\n def put_file(self, local_file, service_file):\n tran = paramiko.Transport(self.host, self.port)\n tran.connect(username=self.user, password=self.pwd)\n sftp = paramiko.SFTPClient.from_transport(tran)\n sftp.put(local_file, service_file)\n tran.close()\n <mask token>\n\n def c_connect(self):\n channel = self.client.open_session()\n\n def close_ssh(self):\n self.client.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SSH:\n <mask token>\n\n def exec_cmd(self, cmd):\n stdin, stdout, stderr = self.client.exec_command(cmd)\n res, err = stdout.read(), stderr.read()\n result = res if res else err\n print('##' + result.decode(encoding='utf-8').replace('\\n', '', -1) +\n '##')\n\n def put_file(self, local_file, service_file):\n tran = paramiko.Transport(self.host, self.port)\n tran.connect(username=self.user, password=self.pwd)\n sftp = paramiko.SFTPClient.from_transport(tran)\n sftp.put(local_file, service_file)\n tran.close()\n\n def get_file(self, service_file, local_file):\n self.client.get_transport()\n sftp = paramiko.SFTPClient.from_transport(self.client)\n sftp.get(service_file, local_file)\n\n def c_connect(self):\n channel = self.client.open_session()\n\n def close_ssh(self):\n self.client.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SSH:\n\n def __init__(self, host, user, pwd, port=22):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.port = port\n self.client = paramiko.SSHClient()\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(host, username=user, password=pwd, port=port)\n\n def exec_cmd(self, cmd):\n stdin, stdout, stderr = self.client.exec_command(cmd)\n res, err = stdout.read(), stderr.read()\n result = res if res else err\n print('##' + result.decode(encoding='utf-8').replace('\\n', '', -1) +\n '##')\n\n def put_file(self, local_file, service_file):\n tran = paramiko.Transport(self.host, self.port)\n tran.connect(username=self.user, password=self.pwd)\n sftp = paramiko.SFTPClient.from_transport(tran)\n sftp.put(local_file, service_file)\n tran.close()\n\n def get_file(self, service_file, local_file):\n self.client.get_transport()\n sftp = paramiko.SFTPClient.from_transport(self.client)\n sftp.get(service_file, local_file)\n\n def c_connect(self):\n channel = self.client.open_session()\n\n def close_ssh(self):\n self.client.close()\n\n\ndef test():\n import paramiko\n import os\n import select\n import sys\n trans = paramiko.Transport((host, 22))\n trans.start_client()\n \"\"\"\n\n default_key_file = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')\n\n prikey = paramiko.RSAKey.from_private_key_file(default_key_file)\n\n trans.auth_publickey(username='super', key=prikey)\n\n \"\"\"\n trans.auth_password(username=user, password=pwd)\n channel = trans.open_session()\n channel.get_pty()\n channel.invoke_shell()\n while True:\n readlist, writelist, errlist = select.select([channel, sys.stdin],\n [], [])\n if sys.stdin in readlist:\n input_cmd = sys.stdin.read(1)\n channel.sendall(input_cmd)\n if channel in readlist:\n result = channel.recv(1024)\n if len(result) == 0:\n print('\\r\\n**** EOF **** \\r\\n')\n break\n sys.stdout.write(result.decode())\n sys.stdout.flush()\n channel.close()\n trans.close()\n\n\n<mask token>\n",
"step-4": "import paramiko\nimport select\nimport os\nimport sys\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nhost = '47.107.229.100'\nuser = 'root'\npwd = 'aliyun1996874353...A'\n\n\nclass SSH:\n\n def __init__(self, host, user, pwd, port=22):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.port = port\n self.client = paramiko.SSHClient()\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(host, username=user, password=pwd, port=port)\n\n def exec_cmd(self, cmd):\n stdin, stdout, stderr = self.client.exec_command(cmd)\n res, err = stdout.read(), stderr.read()\n result = res if res else err\n print('##' + result.decode(encoding='utf-8').replace('\\n', '', -1) +\n '##')\n\n def put_file(self, local_file, service_file):\n tran = paramiko.Transport(self.host, self.port)\n tran.connect(username=self.user, password=self.pwd)\n sftp = paramiko.SFTPClient.from_transport(tran)\n sftp.put(local_file, service_file)\n tran.close()\n\n def get_file(self, service_file, local_file):\n self.client.get_transport()\n sftp = paramiko.SFTPClient.from_transport(self.client)\n sftp.get(service_file, local_file)\n\n def c_connect(self):\n channel = self.client.open_session()\n\n def close_ssh(self):\n self.client.close()\n\n\ndef test():\n import paramiko\n import os\n import select\n import sys\n trans = paramiko.Transport((host, 22))\n trans.start_client()\n \"\"\"\n\n default_key_file = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')\n\n prikey = paramiko.RSAKey.from_private_key_file(default_key_file)\n\n trans.auth_publickey(username='super', key=prikey)\n\n \"\"\"\n trans.auth_password(username=user, password=pwd)\n channel = trans.open_session()\n channel.get_pty()\n channel.invoke_shell()\n while True:\n readlist, writelist, errlist = select.select([channel, sys.stdin],\n [], [])\n if sys.stdin in readlist:\n input_cmd = sys.stdin.read(1)\n channel.sendall(input_cmd)\n if channel in readlist:\n result = channel.recv(1024)\n if len(result) == 0:\n print('\\r\\n**** EOF **** \\r\\n')\n break\n sys.stdout.write(result.decode())\n sys.stdout.flush()\n channel.close()\n trans.close()\n\n\nif __name__ == '__main__':\n ssh = SSH(host, user, pwd)\n test()\n",
"step-5": "# encoding: utf-8\nimport paramiko\nimport select\nimport os\nimport sys\n\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nhost = \"47.107.229.100\"\nuser = \"root\"\npwd = \"aliyun1996874353...A\"\n\nclass SSH:\n def __init__(self, host, user, pwd, port=22):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.port = port\n self.client = paramiko.SSHClient()\n self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.client.connect(host, username=user, password=pwd, port=port)\n\n def exec_cmd(self, cmd):\n stdin, stdout, stderr = self.client.exec_command(cmd)\n res, err = stdout.read(), stderr.read()\n result = res if res else err ##这里我们使用三元运算\n print(\"##\" + result.decode(encoding=\"utf-8\").replace('\\n', '', -1) + \"##\")\n\n def put_file(self, local_file, service_file):\n tran = paramiko.Transport(self.host, self.port)\n tran.connect(username=self.user, password=self.pwd)\n sftp = paramiko.SFTPClient.from_transport(tran)\n sftp.put(local_file, service_file)\n tran.close()\n\n def get_file(self, service_file, local_file):\n self.client.get_transport()\n sftp = paramiko.SFTPClient.from_transport(self.client)\n sftp.get(service_file, local_file)\n\n\n def c_connect(self):\n channel = self.client.open_session()\n\n\n def close_ssh(self):\n self.client.close()\n\n\n\ndef test():\n import paramiko\n import os\n import select\n import sys\n # 建立一个socket\n trans = paramiko.Transport((host, 22))\n trans.start_client()\n # 如果使用rsa密钥登录的话\n '''\n\n default_key_file = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')\n\n prikey = paramiko.RSAKey.from_private_key_file(default_key_file)\n\n trans.auth_publickey(username='super', key=prikey)\n\n '''\n # 如果使用用户名和密码登录\n trans.auth_password(username=user, password=pwd)\n # 打开一个通道\n channel = trans.open_session()\n # 获取终端\n channel.get_pty()\n # 激活终端,这样就可以登录到终端了,就和我们用类似于xshell登录系统一样\n channel.invoke_shell()\n # 下面就可以执行你所有的操作,用select实现\n # 对输入终端sys.stdin和 通道进行监控,\n # 当用户在终端输入命令后,将命令交给channel通道,这个时候sys.stdin就发生变化,select就可以感知\n # channel的发送命令、获取结果过程其实就是一个socket的发送和接受信息的过程\n\n while True:\n\n readlist, writelist, errlist = select.select([channel, sys.stdin, ], [], []) # 如果是用户输入命令了,sys.stdin发生变化\n if sys.stdin in readlist: # 获取输入的内容\n input_cmd = sys.stdin.read(1) # 将命令发送给服务器\n channel.sendall(input_cmd) # 服务器返回了结果,channel通道接受到结果,发生变化 select感知到\n if channel in readlist: # 获取结果\n result = channel.recv(1024) # 断开连接后退出\n if len(result) == 0:\n print(\"\\r\\n**** EOF **** \\r\\n\")\n break # 输出到屏幕\n sys.stdout.write(result.decode())\n sys.stdout.flush() # 关闭通道\n channel.close() # 关闭链接\n trans.close()\n\n\nif __name__ == '__main__':\n # put_file()\n ssh = SSH(host, user, pwd)\n # ssh.put_file(\"easyops.sh\", \"/tmp/easyops.sh\")\n # ssh.exec_cmd(\"ls /tmp\")\n # ssh.exec_cmd(\"sh /tmp/easyops.sh\")\n # ssh.c_connect()\n test()",
"step-ids": [
5,
6,
8,
11,
12
]
}
|
[
5,
6,
8,
11,
12
] |
import math
import numpy as np
import basis.robot_math as rm
import grasping.annotation.utils as gu
from scipy.spatial import cKDTree
def plan_contact_pairs(objcm,
max_samples=100,
min_dist_between_sampled_contact_points=.005,
angle_between_contact_normals=math.radians(160),
toggle_sampled_points=False):
"""
find the contact pairs using rayshooting
the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint
:param angle_between_contact_normals:
:param toggle_sampled_points
:return: [[contact_p0, contact_p1], ...]
author: weiwei
date: 20190805, 20210504
"""
contact_points, face_ids = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2)
contact_normals = objcm.objtrm.face_normals[face_ids]
contact_pairs = []
tree = cKDTree(contact_points)
near_history = np.array([0] * len(contact_points), dtype=bool)
for i, contact_p0 in enumerate(contact_points):
if near_history[i]: # if the point was previous near to some points, ignore
continue
contact_n0 = contact_normals[i]
hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * .001, contact_p0 - contact_n0 * 100)
if len(hit_points) > 0:
for contact_p1, contact_n1 in zip(hit_points, hit_normals):
if np.dot(contact_n0, contact_n1) < -math.cos(angle_between_contact_normals):
near_points_indices = tree.query_ball_point(contact_p1, min_dist_between_sampled_contact_points)
if len(near_points_indices):
for npi in near_points_indices:
if np.dot(contact_normals[npi], contact_n1) > math.cos(angle_between_contact_normals):
near_history[npi] = True
contact_pairs.append([[contact_p0, contact_n0], [contact_p1, contact_n1]])
if toggle_sampled_points:
return contact_pairs, contact_points
return contact_pairs
def plan_grasps(hnd_s,
objcm,
angle_between_contact_normals=math.radians(160),
openning_direction = 'loc_x',
rotation_interval=math.radians(22.5),
max_samples=100,
min_dist_between_sampled_contact_points=.005,
contact_offset=.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm,
max_samples=max_samples,
min_dist_between_sampled_contact_points=min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f"{i} of {len(contact_pairs)} done!")
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError("Openning direction must be loc_x or loc_y!")
grasp_info_list += gu.define_grasp_with_rotation(hnd_s,
objcm,
gl_jaw_center_pos=contact_center,
gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y,
jaw_width=jaw_width,
gl_rotation_ax=contact_n0,
rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])
gripper_s = xag.XArmGripper(enable_cc=True)
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = plan_grasps(gripper_s, objcm, min_dist_between_sampled_contact_points=.02)
for grasp_info in grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gic = gripper_s.copy()
gic.fix_to(hnd_pos, hnd_rotmat)
gic.jaw_to(jaw_width)
print(hnd_pos, hnd_rotmat)
gic.gen_meshmodel().attach_to(base)
base.run()
|
normal
|
{
"blob_id": "738e6d4d608aa977094420a432cbd8a05ea8a1b5",
"index": 4384,
"step-1": "<mask token>\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm,\n min_dist_between_sampled_contact_points=0.02)\n for grasp_info in grasp_info_list:\n (jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,\n hnd_rotmat) = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-4": "import math\nimport numpy as np\nimport basis.robot_math as rm\nimport grasping.annotation.utils as gu\nfrom scipy.spatial import cKDTree\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm,\n min_dist_between_sampled_contact_points=0.02)\n for grasp_info in grasp_info_list:\n (jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,\n hnd_rotmat) = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-5": "import math\nimport numpy as np\nimport basis.robot_math as rm\nimport grasping.annotation.utils as gu\nfrom scipy.spatial import cKDTree\n\n\ndef plan_contact_pairs(objcm,\n max_samples=100,\n min_dist_between_sampled_contact_points=.005,\n angle_between_contact_normals=math.radians(160),\n toggle_sampled_points=False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]: # if the point was previous near to some points, ignore\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * .001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1, min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s,\n objcm,\n angle_between_contact_normals=math.radians(160),\n openning_direction = 'loc_x',\n rotation_interval=math.radians(22.5),\n max_samples=100,\n min_dist_between_sampled_contact_points=.005,\n contact_offset=.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm,\n max_samples=max_samples,\n min_dist_between_sampled_contact_points=min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f\"{i} of {len(contact_pairs)} done!\")\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError(\"Openning direction must be loc_x or loc_y!\")\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s,\n objcm,\n gl_jaw_center_pos=contact_center,\n gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y,\n jaw_width=jaw_width,\n gl_rotation_ax=contact_n0,\n rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n\n base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm, min_dist_between_sampled_contact_points=.02)\n for grasp_info in grasp_info_list:\n jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import mxnet as mx
import numpy as np
import cv2
import random
class Even_iterator(mx.io.DataIter):
'''
data iterator, shuffle data but always make pairs as neighbors
for verification and triplet loss
'''
def __init__(self, lst_name, batch_size, aug_params=dict(), shuffle=False):
super(Even_iterator, self).__init__()
self.batch_size = batch_size
self.aug_params = aug_params.copy()
self.shuffle = shuffle
self.data, self.labels = Even_iterator.load_data(lst_name)
print "load data over"
self.data_num = self.labels.shape[0]
self.label_num = 1 if len(self.labels.shape) == 1 else self.labels.shape[1]
print self.data_num, self.label_num
self.reset()
@staticmethod
def load_data(lst_name):
img_lst = [x.strip().split('\t')
for x in file(lst_name).read().splitlines()]
im = cv2.imread(img_lst[0][-1])
h, w = im.shape[:2]
n, m = len(img_lst), len(img_lst[0]) - 2
data = np.zeros((n, h, w, 3), dtype=np.uint8)
labels = np.zeros((n, m), dtype=np.int32) if m > 1 else np.zeros((n, ), dtype=np.int32)
for i in range(len(img_lst)):
im = cv2.imread(img_lst[i][-1])
data[i] = im
labels[i] = img_lst[i][1:-1] if m > 1 else img_lst[i][1]
return data, labels
@staticmethod
def even_shuffle(labels):
'''
shuffle images lists and make pairs
'''
s = [(x, int(random.random() * 1e5), i) for i, x in enumerate(labels)]
s = sorted(s, key=lambda x: (x[0], x[1]))
lst = [x[2] for x in s]
idx = range(0, len(lst), 2)
random.shuffle(idx)
ret = []
for i in idx:
ret.append(lst[i])
ret.append(lst[i + 1])
return ret
@staticmethod
def model_shuffle(labels):
'''
shuffle images and images with same model are grouped together
'''
models_idx = range(int(np.max(labels)) + 1)
random.shuffle(models_idx)
s = [(models_idx[x], int(random.random() * 1e5), i) for i, x in enumerate(labels)]
s = sorted(s, key=lambda x: (x[0], x[1]))
lst = [x[2] for x in s]
return lst
def reset(self):
self.current = 0
if self.shuffle:
idx = Even_iterator.even_shuffle(self.labels)
# idx = Even_iterator.model_shuffle(self.labels)
self.data = self.data[idx]
self.labels = self.labels[idx]
@property
def provide_data(self):
shape = self.aug_params['input_shape']
return [('data', (self.batch_size, shape[0], shape[1], shape[2]))]
@property
def provide_label(self):
return [('softmax_label', (self.batch_size, self.label_num))]
@staticmethod
def augment(im, aug_params):
'''
augmentation (resize, crop, mirror)
'''
crop_h, crop_w = aug_params['input_shape'][1:]
ori_h, ori_w = im.shape[:2]
resize = aug_params['resize']
if ori_h < ori_w:
h, w = resize, int(float(resize) / ori_h * ori_w)
else:
h, w = int(float(resize) / ori_w * ori_h), resize
if h != ori_h:
im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
x, y = (w - crop_w) / 2, (h - crop_h) / 2
if aug_params['rand_crop']:
x = random.randint(0, w - crop_w)
y = random.randint(0, h - crop_h)
im = im[y:y + crop_h, x:x + crop_w, :]
# cv2.imshow("name", im.astype(np.uint8))
# cv2.waitKey()
im = np.transpose(im, (2, 0, 1))
newim = np.zeros_like(im)
newim[0] = im[2]
newim[1] = im[1]
newim[2] = im[0]
if aug_params['rand_mirror'] and random.randint(0, 1) == 1:
newim = newim[:, :, ::-1]
return newim
def next(self):
if self.current + self.batch_size > self.data_num:
raise StopIteration
shape = self.aug_params['input_shape']
x = np.zeros((self.batch_size, shape[0], shape[1], shape[2]))
y = np.zeros((self.batch_size, self.label_num) if self.label_num > 1
else (self.batch_size, ))
index = []
for i in range(self.current, self.current + self.batch_size):
im = self.data[i]
im.astype(np.float32)
im = Even_iterator.augment(im, self.aug_params)
x[i - self.current] = im
y[i - self.current] = self.labels[i]
index.append(i)
x -= self.aug_params['mean']
x = mx.nd.array(x)
label = mx.nd.array(y)
batch = mx.io.DataBatch(data=[x], label=[label], pad=0, index=index)
self.current += self.batch_size
return batch
|
normal
|
{
"blob_id": "a824bd7577134227f5c136f2a4382c056f1175be",
"index": 9663,
"step-1": "import mxnet as mx\nimport numpy as np\nimport cv2\nimport random\n\n\nclass Even_iterator(mx.io.DataIter):\n '''\n data iterator, shuffle data but always make pairs as neighbors\n for verification and triplet loss\n '''\n def __init__(self, lst_name, batch_size, aug_params=dict(), shuffle=False):\n super(Even_iterator, self).__init__()\n self.batch_size = batch_size\n self.aug_params = aug_params.copy()\n self.shuffle = shuffle\n\n self.data, self.labels = Even_iterator.load_data(lst_name)\n print \"load data over\"\n self.data_num = self.labels.shape[0]\n self.label_num = 1 if len(self.labels.shape) == 1 else self.labels.shape[1]\n print self.data_num, self.label_num\n self.reset()\n\n @staticmethod\n def load_data(lst_name):\n img_lst = [x.strip().split('\\t')\n for x in file(lst_name).read().splitlines()]\n im = cv2.imread(img_lst[0][-1])\n h, w = im.shape[:2]\n n, m = len(img_lst), len(img_lst[0]) - 2\n data = np.zeros((n, h, w, 3), dtype=np.uint8)\n labels = np.zeros((n, m), dtype=np.int32) if m > 1 else np.zeros((n, ), dtype=np.int32)\n\n for i in range(len(img_lst)):\n im = cv2.imread(img_lst[i][-1])\n\n data[i] = im\n labels[i] = img_lst[i][1:-1] if m > 1 else img_lst[i][1]\n\n return data, labels\n\n @staticmethod\n def even_shuffle(labels):\n '''\n shuffle images lists and make pairs\n '''\n s = [(x, int(random.random() * 1e5), i) for i, x in enumerate(labels)]\n s = sorted(s, key=lambda x: (x[0], x[1]))\n lst = [x[2] for x in s]\n\n idx = range(0, len(lst), 2)\n random.shuffle(idx)\n ret = []\n for i in idx:\n ret.append(lst[i])\n ret.append(lst[i + 1])\n\n return ret\n\n @staticmethod\n def model_shuffle(labels):\n '''\n shuffle images and images with same model are grouped together\n '''\n models_idx = range(int(np.max(labels)) + 1)\n random.shuffle(models_idx)\n s = [(models_idx[x], int(random.random() * 1e5), i) for i, x in enumerate(labels)]\n s = sorted(s, key=lambda x: (x[0], x[1]))\n lst = [x[2] for x in s]\n\n return lst\n\n def reset(self):\n self.current = 0\n if self.shuffle:\n idx = Even_iterator.even_shuffle(self.labels)\n # idx = Even_iterator.model_shuffle(self.labels)\n self.data = self.data[idx]\n self.labels = self.labels[idx]\n\n @property\n def provide_data(self):\n shape = self.aug_params['input_shape']\n\n return [('data', (self.batch_size, shape[0], shape[1], shape[2]))]\n\n @property\n def provide_label(self):\n return [('softmax_label', (self.batch_size, self.label_num))]\n\n @staticmethod\n def augment(im, aug_params):\n '''\n augmentation (resize, crop, mirror)\n '''\n crop_h, crop_w = aug_params['input_shape'][1:]\n ori_h, ori_w = im.shape[:2]\n resize = aug_params['resize']\n if ori_h < ori_w:\n h, w = resize, int(float(resize) / ori_h * ori_w)\n else:\n h, w = int(float(resize) / ori_w * ori_h), resize\n\n if h != ori_h:\n im = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)\n\n x, y = (w - crop_w) / 2, (h - crop_h) / 2\n if aug_params['rand_crop']:\n x = random.randint(0, w - crop_w)\n y = random.randint(0, h - crop_h)\n im = im[y:y + crop_h, x:x + crop_w, :]\n\n # cv2.imshow(\"name\", im.astype(np.uint8))\n # cv2.waitKey()\n\n im = np.transpose(im, (2, 0, 1))\n newim = np.zeros_like(im)\n newim[0] = im[2]\n newim[1] = im[1]\n newim[2] = im[0]\n\n if aug_params['rand_mirror'] and random.randint(0, 1) == 1:\n newim = newim[:, :, ::-1]\n\n return newim\n\n def next(self):\n if self.current + self.batch_size > self.data_num:\n raise StopIteration\n\n shape = self.aug_params['input_shape']\n x = np.zeros((self.batch_size, shape[0], shape[1], shape[2]))\n y = np.zeros((self.batch_size, self.label_num) if self.label_num > 1\n else (self.batch_size, ))\n index = []\n for i in range(self.current, self.current + self.batch_size):\n im = self.data[i]\n im.astype(np.float32)\n im = Even_iterator.augment(im, self.aug_params)\n x[i - self.current] = im\n y[i - self.current] = self.labels[i]\n index.append(i)\n\n x -= self.aug_params['mean']\n\n x = mx.nd.array(x)\n label = mx.nd.array(y)\n\n batch = mx.io.DataBatch(data=[x], label=[label], pad=0, index=index)\n self.current += self.batch_size\n\n return batch\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Ylist = ['yes', 'Yes', 'Y', 'y']
Nlist = ['no', 'No', 'N', 'n']
America = ['America', 'america', 'amer', 'rica']
TRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',
'trw', 'Trw', 'TRW']
TCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',
'TCW', 'tcw', 'Tcw']
TGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',
'wW1', 'World War One', 'World war 1']
WW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',
'world war two', 'world war two']
# Russia
Russia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']
RJW = ['1904', 'TRJW', 'trjw']
|
normal
|
{
"blob_id": "6e07dcc3f3b8c7fbf8ce8d481b9612e7496967bd",
"index": 8316,
"step-1": "<mask token>\n",
"step-2": "Ylist = ['yes', 'Yes', 'Y', 'y']\nNlist = ['no', 'No', 'N', 'n']\nAmerica = ['America', 'america', 'amer', 'rica']\nTRW = ['1775', 'The Revolutionary war', 'the Revolutionary war',\n 'the revolutionary war', 'The Revolutionary War', 'trw', 'Trw', 'TRW']\nTCW = ['1861', 'The civil war', 'The civil War', 'The Civil war',\n 'The Civil war', 'The civil War', 'The Civil War', 'TCW', 'tcw', 'Tcw']\nTGW = ['1917', 'The Great War', 'the great war', 'the great War',\n 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1', 'wW1',\n 'World War One', 'World war 1']\nWW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two',\n 'World War 2', 'World War Two', 'world war two', 'world war two']\nRussia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']\nRJW = ['1904', 'TRJW', 'trjw']\n",
"step-3": "Ylist = ['yes', 'Yes', 'Y', 'y']\r\nNlist = ['no', 'No', 'N', 'n']\r\nAmerica = ['America', 'america', 'amer', 'rica']\r\nTRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',\r\n 'trw', 'Trw', 'TRW']\r\nTCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',\r\n 'TCW', 'tcw', 'Tcw']\r\nTGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',\r\n 'wW1', 'World War One', 'World war 1']\r\nWW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',\r\n 'world war two', 'world war two']\r\n# Russia\r\nRussia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']\r\nRJW = ['1904', 'TRJW', 'trjw']\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def patternCount(dnaText, pattern):
count = 0
for i in range(0, len(dnaText) - len(pattern)):
word = dnaText[i:i+len(pattern)]
if (word == pattern):
count = count + 1
return count
def freqWordProblem(text, k):
countWords = []
for i in range(0, len(text) - k):
pattern = text[i:i+k]
countWords.append(patternCount(text, pattern))
maxCount = 0
indexes = []
for j in range(0, len(countWords)):
count = countWords[j]
if (count == maxCount):
indexes.append(j)
elif (count > maxCount):
indexes = [j]
maxCount = count
result = set()
for index in indexes:
result.add(text[index:index+k])
return list(result)
mapDNA = {
"A": "T",
"G": "C",
"T": "A",
"C": "G"
}
def complimentDNA(text):
result = ""
for letter in text:
result = result + mapDNA[letter]
return result[::-1]
def patternFind(text, pattern):
index = []
for i in range(0,len(text)-len(pattern)):
word = text[i:i+len(pattern)]
if word == pattern:
index.append(i)
return index
for word in patternFind("AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC","GCGCCACGC"):
print(word)
|
normal
|
{
"blob_id": "29c1a989365408bf5c3d6196f7afc969be63df85",
"index": 5942,
"step-1": "<mask token>\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\n<mask token>\n",
"step-2": "def patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i + len(pattern)]\n if word == pattern:\n count = count + 1\n return count\n\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i + k]\n countWords.append(patternCount(text, pattern))\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if count == maxCount:\n indexes.append(j)\n elif count > maxCount:\n indexes = [j]\n maxCount = count\n result = set()\n for index in indexes:\n result.add(text[index:index + k])\n return list(result)\n\n\n<mask token>\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\n<mask token>\n",
"step-3": "def patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i + len(pattern)]\n if word == pattern:\n count = count + 1\n return count\n\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i + k]\n countWords.append(patternCount(text, pattern))\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if count == maxCount:\n indexes.append(j)\n elif count > maxCount:\n indexes = [j]\n maxCount = count\n result = set()\n for index in indexes:\n result.add(text[index:index + k])\n return list(result)\n\n\n<mask token>\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\nfor word in patternFind(\n 'AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC'\n , 'GCGCCACGC'):\n print(word)\n",
"step-4": "def patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i + len(pattern)]\n if word == pattern:\n count = count + 1\n return count\n\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i + k]\n countWords.append(patternCount(text, pattern))\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if count == maxCount:\n indexes.append(j)\n elif count > maxCount:\n indexes = [j]\n maxCount = count\n result = set()\n for index in indexes:\n result.add(text[index:index + k])\n return list(result)\n\n\nmapDNA = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}\n\n\ndef complimentDNA(text):\n result = ''\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0, len(text) - len(pattern)):\n word = text[i:i + len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\n\nfor word in patternFind(\n 'AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC'\n , 'GCGCCACGC'):\n print(word)\n",
"step-5": "\ndef patternCount(dnaText, pattern):\n count = 0\n for i in range(0, len(dnaText) - len(pattern)):\n word = dnaText[i:i+len(pattern)]\n if (word == pattern):\n count = count + 1\n return count\n\ndef freqWordProblem(text, k):\n countWords = []\n for i in range(0, len(text) - k):\n pattern = text[i:i+k]\n countWords.append(patternCount(text, pattern))\n\n maxCount = 0\n indexes = []\n for j in range(0, len(countWords)):\n count = countWords[j]\n if (count == maxCount):\n indexes.append(j)\n elif (count > maxCount):\n indexes = [j]\n maxCount = count\n\n result = set()\n for index in indexes:\n result.add(text[index:index+k])\n\n return list(result)\n\nmapDNA = {\n \"A\": \"T\",\n \"G\": \"C\",\n \"T\": \"A\",\n \"C\": \"G\"\n}\ndef complimentDNA(text):\n result = \"\"\n for letter in text:\n result = result + mapDNA[letter]\n return result[::-1]\n\ndef patternFind(text, pattern):\n index = []\n for i in range(0,len(text)-len(pattern)):\n word = text[i:i+len(pattern)]\n if word == pattern:\n index.append(i)\n return index\n\nfor word in patternFind(\"AATGGTCGAAGCGCCACTGCGCCACGACTAAACGCGCCACTAAATCTCCATCAGAGCGCCACTGCGCCACGTGGCGCCACTAGCGCCACCCGTTGGCGCCACTGGGCGCCACGCGCCACGCGCGCCACGCGCCACTGCGCCACCTGAGTAGCGCCACATAGACCGGCGCCACTAAGCGCCACTGCGCCACAGAGGGCGCCACTAACTGCGCCACGCGCCACGTGCGCGCCACGGGCGAACGTTGCGCCACCTTGCCCCGCGCCACGCGCCACGCGCCACGCGCCACCGGGGGCATTGCGCCACGCGCCACGCGCCACCCCGCGCCACGTGTGCCGCGCCACAGCGCCACTTGGCGCGCCACACGCGCCACGCGCCACCTAGCGCCACATAGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACATCTGCGCCACCAGCGCCACGAGCGCCACCATACTGGCGCCACAGCGCGCCACGGCGCCACGCGCCACCCATGCGCCACATAGCCTGTGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACCGCGCCACTGAGCGCCACCCCTAGGCGCCACGCGCCACGCGCCACTGCGCCACGCGCCACGAGGGGGTGCGCCACACGGCCCCAGCGCGAGAGTGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGGCGCCACGCGCCACATCGGGCGCCACTTAAGCCTTGGATTTGCGCCACGGCGGGCGCCACGCCAGCGCCACGTCACAGCGGGCGCCACACGCGCCACTGCGCCACGCGCCACAGCGCGCCACGGCGCCACGAAACGCGCCACCCGGCGCCACGCGCCACTACAAGCGCCACAAGGCGCGCCACTGGCGCCACTGCGCCACACAGGCGCCACGCGCCACGCGCCACATGCGCCACAGGCGCCACGGGGCGCGCCACTTCGCGCCACGCGCCACGGCGCCACGCGCCACAGCGCCACCCTCGCGCCACCGCGCCACGCGCCACGCGCCACGCGCCACGGGCGCCACTCGCGCCACTTCGCGCCACGCGCCACTTCGGGCGCCACGCGCCACGAAATGCGCCACCTGAGGCGCCACTGCGCCACGCGCCACCAGCGCCACAGCGCCACGGGCGCGCCACGCGCCACTCCGCGCCACCGCGCGCCACGCGCCACGAAGCGCCACGCGCCACTGCGCCACACCTGGATAAGCGCCACTGCGCCACACGCTGCGCCACGGCGCCACTGGGGCGCCACGCGCCACAGCGCCACGCGCCACGCTCGCGCCACGCGCCACGTGCGCCACTCCGCGCCACATTTGCGCCACGCGCCACAAGCGCCACGGTGCGCCACTACATGTGTGGTGCGCCACACGAGCGCCACGTCTGTTCACGCGCCACCCTATAGGCGCCACTCAGCGCCACAGCGCCACCTGTAGTGCGCCACGCGCCACCAGGCGCCACCCAGGCGTGCCGTGAGATAGCGCCACGGCGCCACAGCGCCACGCGCCACTCGCGCCACGTGCGCCACCGCGCCACTTTCGCGCCACCCAGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACATGCGCCACCCGCGCCACGTGCGCCACTAGATCGCGCCACTCCGGCGCCACGCGCCACTACGCGCCACACGCGCCACAGCGCCACTCTGAAATTCGCGCCACGCGCCACGCGCCACGAGCGCCACTCGCGCCACAAGCGCCACGCGCCACACCCGCGCCACGCGCCACGCGCCACGCTCTGTTGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACGGCTTGGTTGCGCCACAGCGCCACGGCGCCACGCGCCACTCACCGAAGTGATGGCGCCACACGAGGAGGCGCCACAGCGCCACTGCGCCACTGCGCCACTGCGCCACGCGCCACTTGGCGCGCCACTGGCGCCACGCGCCACAGCGCCACTACGCGCCACCGCGCCACGCGCCACTGCGCCACCGCGCCACGCGCCACGGCGCCACTGATGCGCCACCGCGCCACTGTCGTGCGCCACGTCTGGGCTACGGCGCCACCGCGCCACATGGGGCGCCACGACGCGCCACCGGCGCCACACGCGCCACGTGCGCCACCGGCGCCACCGCGCCACTGAGCGCCACACTGCTTGCGCCACATAGAGGCGCCACGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAATTCTGCCATCTGCGGTCGCGCCACGGTATTTTGCGCCACGCGCCACCCTAGGCGCCACTAGCGCCACAAGCGCCACTCGGCGCCACCCTCAGGCGCCACAGCGCCACTACAGCGCCACTGCGCGCCACGCGCCACGCGCCACATTGCCTCAGCGCCACGGCGCCACGCGCTGTAGCGCCACGCGCCACGTTGCGCCACATGCGCCACACATGCGCCACCCTTGCGCCACGCGCCACCAAAGCGCCACGGCGCCACGCGCCACCTGACGGTGGCGCCACCCCTGCTCTTTGCGCCACGGCGCCACGCGCCACTAGCGCCACGCGCCACGCGCCACTGCGCCACAATGTGCGCCACCCCTCTAGGCGCCACTGCGCCACGGCGCCACGGCGCCACGTAGCGCCACGCGCCACGCGCCACCAGTCAATCGGCGCCACCATTCGGCCCGACGCGCCACGCGCCACTGCGCCACGGGCGCCACCATAACGGCGCCACGCGCCACGCGCCACTTGGTTTGATAGCCTGCGCCACAGCGCCACGGCGGCGCCACGTGTAGCGCCACGCGCCACCTTATGCGCCACGCGCCACGCGCCACGAGCGCCACGCGCCACCTTAATTAGCGCCACATAGCGCCACTGCGCCACGCGCCACCAAGATACAAGCGCCACTGGGCGCCACGCGCCACGCGCCACGCGCCACTGCTGCAGGCAGCGCCACCACAGGCGCCACTAGCGCCACTTGAGGCGCCACCGCGCCACTAGAGCGCCACGCGCCACTGGCGCCACTCCCGTCGGCGCCACCGCGCCACAGAGCGCCACCTGCGCCACGCGCCACTCGCGCCACCGTCACGCGCCACAGGCGCGCCACCCCGGCGCCACTGCGCCACTCTAGCGCCACTGCGCGCCACATATGGCGCCACCTACAACAGCGCCACACGCGCCACGCTATTCGCGCCACGCGCCACCGAGCGCCACGCGCCACATATACTCCTATTTGCGCCACGCGCCACCGTGGCGCCACGCGCCACCAGCGCCACTTGGTGCGCCACCCAAGCGCCACCTGCGCCACAGCGCCACTCGCGCCACGATAGATTTGCAAGCGCCACGTCCCGCGCCACAAGCGCCACTCTGCGCCACCGCGCCACGCGCCACTCTATGGCGCCACCGGCGCCACGCGCCACCCAGGCGCCACTCGCGCCACAGCGCCACGCGCGCCACTTCTTAGGCGCCACGCGCCACCAGGCGCCACTCTGCGCGCCACGTAGCGCCACTCGTGCGCCACTTTCGGCATGCGCGCCACATAGGCGCCACGGCGCCACCCCTGGCGCCACAGCGCCACGCGCCACTAGGGCGCCACTCGGCTCGAGCGCCACGCGCCACCGCGCCACTCGCGCCACGCGCCACGCGCCACATCTGCGCCACGCGCCACGCGCCACAGCGCCACCGCGCCACTTGGGCTATGGCGCCACGAAGCGCCACGGCGCCACGTGCGCCACCTAACGCGCCACAGCGCCACGCGCCACATGTCGCCCAGCGCCACCGCGCCACGGCGCCACCGCGCCACCCACGCCGCGCCACTGCGCCACAGCGCCACGGCGCCACGCGCCACATGCGCCACGCAGCGCCACCCCTCCGCCGCGCCACCGGTGCGCCACGCGCCACGCGCCACTTGGCGCCACGCTTCTGCGCCACGGCGCCACCAGCGCCACTAAGCGCCACAGCGCCACCAGCGCCACAAGGGCGCCACTAATGCGCCACAAGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACACCGTCCGCGCCACCTGCGCCACAGGCGCCACGTTTTCTTACTGTATAATAGCGCCACTTCGCGCCACGCGCCACGGGCGCCACGAATGTGTGCGCCACGTCGTGCGCCACGCGCCACGCGCCACTGGGCGCCACGCGCCACCCCCCCGCGCCACTAGCGCCACGCGCCACGCGCCACTGAATTGTAGCGCCACGCGCCACGCGCGCCACTCTATAGGCAAATTGGGCGCCACGCGCCACTATGGTAGTCGTGCGCCACTGCGCCACGCGCCACGCTGCGCCACGCGCCACTGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACGCGCCACCACGCGCCACCTCGCGCGCCACGCGCCACCTAGTAGCGCCACAGCGCCACGAGCGCCACCAGCGCCACGGAAGGCGCGCCACAGGCGCCACTTAGCGCCACCGGCGCCACGCGCCACAGCGCCACTGTGCGCCACGCGCCACGCGCCACTGCGCCACGCCAAATGCGCGCCACCCGCGCCACAAGCGCCACATTGGGGGTCTTAGCGCCACATCGCGCCACGCGCCACCAAATTGCGCGCCACACAGGTCGCGCCACTTCGTCCAGGCGCCACTTCAGCGCCACGGCGCCACGCGCCACGCGCCACTCCGAGCGCCACTGTGCGCCACAGACAGCTCCGCGCCACGGGAGGGGCGCCACATTTTAGCGCCACGCGCCACGCGTTTTGAGCGCCACTGCGCCACCGGAAAGCGCCACTGCTGAGGCGCCACCACAGCGCCACCGCGCCACGCACTGGCGCCACGATATGCGCCACCGCGCCACCGCGCCACAAGGGCGCCACGCGCCACCTAGACGCGCCACGAAAGCGCCACCACTATGCGCCACTCGGCGCCACGCGCCACCACAGGAAAGTATGCGCCACAAAACTGCGCCACGCGCCACGCGCCACGCGCCACGGATTGCGCCACGCGCCACGCGCCACCACGCGCCACGTCCCTGGCGCGCCACGCGCCACGCGCCACGGCGCCACAATGCGCCACATGCGCCACAGGCGCCACCGCGCCACTGCGCCACGCGCCACTGCGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACTAGGCGCCACACTGCGCGCCACGCTACGGAGGACCGATAGCGCCACAGCGCCACGATAGCGCCACGCGCCACGAGTTCTGCGCCACAATGGCCTAGTCGCGCCACCGCGCCACTTACGCGCCACTGCGCCACGCGCCACTTGCGCCACGGCGCCACGCGCCACTGGCGCCACGCGCCACCTCCCGCTGCGCCACGCGCCACTATCCGTGCGCCACGCGCCACTGGCGCCACGCGGCGCCACTGCGAAGTAATCGCGCCACAGCGCCACGCTAAGCGCCACATGCGCGGTGTGCGCCACGCGCCACGCGCCACTCAAGCGCCACAAGCGCCACGAGCGCCACGCGCCACGCGCCACTAAGCGCCACGGCGCCACTTTAATATGCGCCACAAAGCGCCACTGCGCCACCTGCTGCGCCACATCACGCGCCACGAGCGCCACAGTTGCGCGCCACATAATGGGCGCCACGGCCCGTTGAGCGCCACGCGCCACGCGCCACAGTGCCACCCGCGCCACACGCGCCACAGTCGCGCCACGGCGCCACCGGGTTATTATGCGCCACGTGCGGTCGCGCCACTGCGCCACGCGCGCCACAAGCGCCACTCGGATGGCGCCACTTCTGTGGCGCCACGCGCCACACTGCGCCACAGCGCCACGCGCCACACGCGCCACCGCGCCACCGAGTATTAGCGCCACTGCGCCACAAGATGGCGCCACGCGCCACCAGGCGCCACTCGCGCCACGGCGCCACGCGCCACGCGCCACGCGCCACCGCGCCACCTGCGCCACAAGGGGCGCGCCACGCGCCACGCGCCACAGGACCATCGGCGCCACGTCATGGCGCCACCTGGGTCCATGCGCGCCACCTCCTGCGCCACAGCGCCACCGTCTAAAGCGCCACGAGCGCCACGGCGCCACGCGCCACAGCGCCACGCGCCACCTTCACCGAGTCCGCAGGCGCCACCGTCATAATTGAGTGCGCCACCCCATGCCGCGCCACATAGCGCCACGCTGGCGCCACGGCGCCACGCGCCACGCGCGCCACAGCGCCACGCGCCACACGCGCGCCACAGCGCCACGCGCCACTGTATCGCGCCACGAGCGCCACGTGTGCCACCTACGCTATGCGCCACAAGCGCCACGAGCGCCACAAAGCGCCACTGCGCCACTGGCGCCACTGTGGCAGCGCCACTCGCGCCACTGAAGCGCCACAGCGCCACGGCGCCACGCGCCACCCGGATGCGCCACAGCGCCACATGCGCCACTGCGCCACCATGCTGCGCCACAGCGCCACAGTCGCGCCACTGCGCCACGCGCCACCTCGCGCCACTTGCGCCACGCGCCACGCTGGCAGCGCCACTGCGCCACACAAGATTTCTGAGCGCCACCGACCCCTATAAGCGCCACTGAAGGTGCGCCACGCGCCACTCGCGCCACCTAAGGCGCCACTCTGCGCCACGTGCGCCACCCCTGCGCCACTGCGCCACCTGCGCCACTGAGCGCGCCACGGGCGCCACGCGCCACCCTGCGCCACTGCGCCACTGCGCCACGCTGCGCCACCTGAACTTGGCGCCACGTAGCGCCACGCGCCACGCGCCACCGCGCCACGCGCCACTCGCGCCACTCGTGCGCCACAAGAGCGCCACTGGTTAAGCGATTTGCGCCACGCGCCACACTTGCGCCACGCGCCACAAGTCGGGGCGCCACCTCGCCCCTCGCGCCACTAGCGCCACGCGCCACCACCTGAAAGCGCCACACTACTAGGCGCCACAGGCGCCACGCGCCACGCGCCACCGTCTGCGCCACGCGCGCCACCAGCGCCACAGGGTGCGCCACCGCGCCACGCGCCACCGCGCCACGCGCCACTCCCGGTCAGGGGCGCCACAAAAGGCGCCACAGGGAGGTTCTATCCGCGCCACACACGCGCCACGACGAACATGCGCGCCACTGCGCCACCGCGCCACTAAGCGCCACGGGCGCCACGCGCCACCTAGTCGCGCCACAAAGTGATCGCGGCGCCACCTGCGCCACCGGCGCCACCCAAGCGCCACTTAGGCGCCACGTGCGCCACCCTTGCGCCACCTTGCGCCACCGCGCCACGGCGCCACTCGTGGTGCGCCACTGCCCGCGCCACATAGCGCCACCCGAGCGCCACAGCGTGTTATTTGCGCCACAGCGCCACGCGCCACTGCGCCACGCATGCCGCGCCACAGTGCGCCACGACGCGCCACTGCGCCACGTCGTCTTCGCGCCACTTGACCGACATTTGCCTAGCGCCACCGCGCCACGGCGCCACGGCGCCACCAGCCGATTGCTTTCTTGTTCCACAGGTGCGCCACTGGCGCCACAGAGCGCCACCGGCGCCACGGCGCCACTGCGCCACGCGCCACGCGCCACATGCGCCACTAGCGCCACGCGCCACCCCGCGCCACTGCGCCACATGGCGCCACCAAAGGGTTGCGCCACTTGCGCCACTGCGCCACTCGCGCCACGTTTGCCGCGCCACAAGGCGCCACTTGCGCCACGCGCCACTGCGCCACGTTACCGCCGCGCCACTTAAACCCTGCTTGCGCCACCTGCGCCACGCGCCACGCGCCACAAAACTAGCGCCACTTCAGTACGCGCCACAGCGCCACCTCTCAGCGCGCCACCATCTGGCGCCACACGTGGCGCCACTGCGCCACAGCGCCACTGTGCGCCACCGCGCCACATTTAGCGCCACCAGCGCCACGGCTGCGCCACCCCCCCTTTGCGCCACAAAGCGCCACGCCAGCGCCACTTGGCGCCACAGCGCCACGGGCGGCGCCACGCGCCACGCGCCACGGCGCCACGCGCCACAGAAAAGCGCCACAGAGCGCCACAAATCTGGCAGCAGCGCCACAAGTGCGCCACCGCGCCACGAAAGAGGACCCAGCGCCACAGCGCCACATGCGCCACGTGCGCCACGCGCCACTGTGATCGCGCCACGCGCCACGCGCCACCAGAATAGCGCCACAGCGCCACCGCGCCACAGCGCCACAAGGTTCACTGGCGCCACGCGCCACCGCGCCACTCGCGCCACCGGCGCCACCGACCGCGCCACTATGCGCCACCATGCGCCACGCGCCACCTGCGCCACGAGCGCCACAGCGCCACCTTTATGCGCCACGCACCGCGCCACGAGCTTGCGCCACTTTCGCGCCACGCGCCACCTTTATAAAGCGCCACTCGCTGCGCCACGCGCCACCTAATCGCGCCACTGGCGCCACGCGCCACCTGCGCCACGCAAGGCGCCACGGCGCCACAGCGCCACGCGCCACTCGGCGCCACGGAGGCGCCACAGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGCGCCACGTGATAAGCGCCACAGACTCGCGGCGCCACTTCTGCGCCACGCGCCACTCGGGCGCCACAATGCGCCACGGCGCCACACGCGCCACAGGCGCCACCTGTTGCGCCACCGCGCCACGGCTCAATGCGCCACTAGGCGCCACTGCGCCACGCGCCACTTGCGCCACGCGCCACCCATGTATGCGCCACGCGCCACGTGGTGGGCGCCACTTCTCGTGCGCCACAACTCCATAAAACGGCGCCACCGCGCCACACAGCGCCACAGCGCCACGTTCAGCGCCACTTGGCGCCACTGCGCCACCGCGCCACTGTGCGCCACCGCGCCACGCGCCACGTGGCGCCACGCGCCACTGCGCCACTTGCGCCACAGCGCCACGCTACTTAGTCGTGGCGCCACTTGTGTGGCACCATTAGCGAGGGCGCCACGGCGCCACCAAGCGCCACTAAATGGTGGGCGGCGCCACTGCGCCACTTGCGCCACTTAGCGCCACCTAGCGCCACCACCGCGAAAGCGCCACTTCAGCGCCACGAATGCGCCACGCGCCACTTCCTAACTTGCGCCACGGCACGGCGCCACGCTGCGCCACGGCACGAAAGCGCCACTGCGCCACGGCGCCACACCAAGCGCCACGCGCCACGGCGCCACAGCGCCACAGCGCCACACAGCCGGCGCCACAGCGCCACGGCGCCACTCCAGCGCCACCCTAGCGCCACGGCGCCACCTCGTGCGCCACTGCGCCACTAGCGCCACTGCGCCACGTTGCGCCACGCGCCACTGGGCGCCACATAAACAATAGCGCCACGGGCGCCACGCGCCACGGCGAGCGCCACTCGCGCCACGCGCCACGCGCCACTGCGCCACATGCCCCAGCGCCACGCGCCACTGCGCCACGCGCCACAGCGCCAC\",\"GCGCCACGC\"):\n print(word)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
# https://www.acmicpc.net/problem/3584
import sys, collections
input = sys.stdin.readline
N = int(input())
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a,b):
if aa==bb:
result = aa
print(result)
|
normal
|
{
"blob_id": "d60a2d4c819f701e8e439b8839415aa2838df185",
"index": 6415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-4": "import sys, collections\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-5": "# https://www.acmicpc.net/problem/3584\nimport sys, collections\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a,b):\n if aa==bb:\n result = aa\n print(result)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def walk(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
print(path)
else:
walk(path)
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cwd = os.getcwd()
def walk(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
print(path)
else:
walk(path)
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
<|reserved_special_token_1|>
import os
cwd = os.getcwd()
def walk(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
print(path)
else:
walk(path)
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
<|reserved_special_token_1|>
# #writing a file
# fout = open('Session14/output.txt', 'w')
# line1 = "How many roads must a man walk down\n"
# fout.write(line1)
# line2 = "Before you call him a man?\n"
# fout.write(line2)
# #when you are done writing, you should close the file.
# fout.close()
# #if you dont close the file, it gets closed for you when the program dies
#exercise 1
# def sed(pattern, replace, source, dest):
# with open(source, 'r') as f_r:
# with open(dest, 'w') as f_w:
# for line in f_r:
# new_line = line.replace(pattern, replace)
# f_w.write(new_line)
# pattern = " man "
# replace = " woman "
# source = "Session14/output.txt"
# dest = "Session14/output2.txt"
# sed(pattern, replace, source, dest)
import os
cwd = os.getcwd()
#cwd stands for "current working directory"
# print(cwd)
#os.path provides other functions for working with filenames and paths
# os.path.abspath('output.txt')
# os.path.exists('output.txt')
# os.path.isdir('output.txt')
# os.path.isdir('/exercises')
# os.path.isfile('output.txt')
# os.listdir(cwd)
def walk(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for name in os.listdir(dirname):
path = os.path.join(dirname, name)
if os.path.isfile(path):
print(path)
else:
walk(path)
#os.path.join takes a directory and a file name and joins them inot a complete path
def walk2(dirname):
"""Prints the names of all files in
dirname and its subdirectories.
dirname: string name of directory
"""
for root, dirs, files in os.walk(dirname):
for filename in files:
print(os.path.join(root, filename))
|
flexible
|
{
"blob_id": "de1262da699a18266ad8673597391f625783a44d",
"index": 5721,
"step-1": "<mask token>\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-2": "<mask token>\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-3": "<mask token>\ncwd = os.getcwd()\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-4": "import os\ncwd = os.getcwd()\n\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-5": "# #writing a file\n# fout = open('Session14/output.txt', 'w')\n# line1 = \"How many roads must a man walk down\\n\"\n# fout.write(line1)\n# line2 = \"Before you call him a man?\\n\"\n# fout.write(line2)\n# #when you are done writing, you should close the file.\n# fout.close()\n# #if you dont close the file, it gets closed for you when the program dies\n\n#exercise 1\n# def sed(pattern, replace, source, dest):\n# with open(source, 'r') as f_r:\n# with open(dest, 'w') as f_w:\n# for line in f_r:\n# new_line = line.replace(pattern, replace)\n# f_w.write(new_line)\n\n# pattern = \" man \"\n# replace = \" woman \"\n# source = \"Session14/output.txt\"\n# dest = \"Session14/output2.txt\"\n# sed(pattern, replace, source, dest)\n\nimport os\ncwd = os.getcwd()\n#cwd stands for \"current working directory\"\n# print(cwd)\n\n#os.path provides other functions for working with filenames and paths\n# os.path.abspath('output.txt')\n# os.path.exists('output.txt')\n# os.path.isdir('output.txt')\n# os.path.isdir('/exercises')\n# os.path.isfile('output.txt')\n# os.listdir(cwd)\n\ndef walk(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\" \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)\n#os.path.join takes a directory and a file name and joins them inot a complete path\n\ndef walk2(dirname):\n \"\"\"Prints the names of all files in \n dirname and its subdirectories.\n\n dirname: string name of directory\n \"\"\"\n for root, dirs, files in os.walk(dirname):\n for filename in files:\n print(os.path.join(root, filename))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def merge_sort(merged_arr: list):
"""
функция делит поданный на вход массив,
и рекурсивно все сортирует слиянием
:param merged_arr: - список на входе
:return: - список отсортированный слиянием на выходе
"""
if len(merged_arr) <= 1:
return
middle = len(merged_arr) // 2
left = merged_arr[:middle]
right = merged_arr[middle:]
merge_sort(left)
merge_sort(right)
comb_arr = merge(left, right)
for i in range(len(merged_arr)):
merged_arr[i] = comb_arr[i]
return merged_arr
def merge(merge_1: list, merge_2: list):
"""
Функция собирает из двух предварительно отсортированных массивов,
поданных на вход, один и ео же возвращает
:param merge_1: - первый отсортированный список
:param merge_2: - второй отсортированный список
:return: - "слитый" из двух, отсортированный список
"""
merged_arr = [0] * (len(merge_1) + len(merge_2))
i = k = n = 0
while i < len(merge_1) and k < len(merge_2):
if merge_1[i] <= merge_2[k]:
merged_arr[n] = merge_1[i]
i += 1
n += 1
else:
merged_arr[n] = merge_2[k]
k += 1
n += 1
while i < len(merge_1):
merged_arr[n] = merge_1[i]
i += 1
n += 1
while k < len(merge_2):
merged_arr[n] = merge_2[k]
k += 1
n += 1
return merged_arr
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('*' * 30)
print('Initial array:')
print(array)
print('*' * 30)
def merge_sort(merged_arr: list):
"""
функция делит поданный на вход массив,
и рекурсивно все сортирует слиянием
:param merged_arr: - список на входе
:return: - список отсортированный слиянием на выходе
"""
if len(merged_arr) <= 1:
return
middle = len(merged_arr) // 2
left = merged_arr[:middle]
right = merged_arr[middle:]
merge_sort(left)
merge_sort(right)
comb_arr = merge(left, right)
for i in range(len(merged_arr)):
merged_arr[i] = comb_arr[i]
return merged_arr
def merge(merge_1: list, merge_2: list):
"""
Функция собирает из двух предварительно отсортированных массивов,
поданных на вход, один и ео же возвращает
:param merge_1: - первый отсортированный список
:param merge_2: - второй отсортированный список
:return: - "слитый" из двух, отсортированный список
"""
merged_arr = [0] * (len(merge_1) + len(merge_2))
i = k = n = 0
while i < len(merge_1) and k < len(merge_2):
if merge_1[i] <= merge_2[k]:
merged_arr[n] = merge_1[i]
i += 1
n += 1
else:
merged_arr[n] = merge_2[k]
k += 1
n += 1
while i < len(merge_1):
merged_arr[n] = merge_1[i]
i += 1
n += 1
while k < len(merge_2):
merged_arr[n] = merge_2[k]
k += 1
n += 1
return merged_arr
print('Merge sorted array:')
print(merge_sort(array))
print('*' * 30)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
size = 13
array = [randint(0, 50) for x in range(size)]
print('*' * 30)
print('Initial array:')
print(array)
print('*' * 30)
def merge_sort(merged_arr: list):
"""
функция делит поданный на вход массив,
и рекурсивно все сортирует слиянием
:param merged_arr: - список на входе
:return: - список отсортированный слиянием на выходе
"""
if len(merged_arr) <= 1:
return
middle = len(merged_arr) // 2
left = merged_arr[:middle]
right = merged_arr[middle:]
merge_sort(left)
merge_sort(right)
comb_arr = merge(left, right)
for i in range(len(merged_arr)):
merged_arr[i] = comb_arr[i]
return merged_arr
def merge(merge_1: list, merge_2: list):
"""
Функция собирает из двух предварительно отсортированных массивов,
поданных на вход, один и ео же возвращает
:param merge_1: - первый отсортированный список
:param merge_2: - второй отсортированный список
:return: - "слитый" из двух, отсортированный список
"""
merged_arr = [0] * (len(merge_1) + len(merge_2))
i = k = n = 0
while i < len(merge_1) and k < len(merge_2):
if merge_1[i] <= merge_2[k]:
merged_arr[n] = merge_1[i]
i += 1
n += 1
else:
merged_arr[n] = merge_2[k]
k += 1
n += 1
while i < len(merge_1):
merged_arr[n] = merge_1[i]
i += 1
n += 1
while k < len(merge_2):
merged_arr[n] = merge_2[k]
k += 1
n += 1
return merged_arr
print('Merge sorted array:')
print(merge_sort(array))
print('*' * 30)
<|reserved_special_token_1|>
from random import randint
size = 13
array = [randint(0, 50) for x in range(size)]
print('*' * 30)
print('Initial array:')
print(array)
print('*' * 30)
def merge_sort(merged_arr: list):
"""
функция делит поданный на вход массив,
и рекурсивно все сортирует слиянием
:param merged_arr: - список на входе
:return: - список отсортированный слиянием на выходе
"""
if len(merged_arr) <= 1:
return
middle = len(merged_arr) // 2
left = merged_arr[:middle]
right = merged_arr[middle:]
merge_sort(left)
merge_sort(right)
comb_arr = merge(left, right)
for i in range(len(merged_arr)):
merged_arr[i] = comb_arr[i]
return merged_arr
def merge(merge_1: list, merge_2: list):
"""
Функция собирает из двух предварительно отсортированных массивов,
поданных на вход, один и ео же возвращает
:param merge_1: - первый отсортированный список
:param merge_2: - второй отсортированный список
:return: - "слитый" из двух, отсортированный список
"""
merged_arr = [0] * (len(merge_1) + len(merge_2))
i = k = n = 0
while i < len(merge_1) and k < len(merge_2):
if merge_1[i] <= merge_2[k]:
merged_arr[n] = merge_1[i]
i += 1
n += 1
else:
merged_arr[n] = merge_2[k]
k += 1
n += 1
while i < len(merge_1):
merged_arr[n] = merge_1[i]
i += 1
n += 1
while k < len(merge_2):
merged_arr[n] = merge_2[k]
k += 1
n += 1
return merged_arr
print('Merge sorted array:')
print(merge_sort(array))
print('*' * 30)
<|reserved_special_token_1|>
# 2. Отсортируйте по возрастанию методом слияния одномерный вещественный массив,
# заданный случайными числами на промежутке [0; 50).
# Выведите на экран исходный и отсортированный массивы.
from random import randint
# создаем массив [0, 50) случайных чисел
size = 13
array = [randint(0, 50) for x in range(size)]
print('*' * 30)
print('Initial array:')
print(array)
print('*' * 30)
def merge_sort(merged_arr: list):
"""
функция делит поданный на вход массив,
и рекурсивно все сортирует слиянием
:param merged_arr: - список на входе
:return: - список отсортированный слиянием на выходе
"""
# если массив единичный, то "приехали"
if len(merged_arr) <= 1:
return
# разбиваем начальный массив на левую и правую части
middle = len(merged_arr) // 2
left = merged_arr[:middle]
right = merged_arr[middle:]
# рекуррентно их сортируем
merge_sort(left)
merge_sort(right)
# "сливаем" левую и правые части
comb_arr = merge(left, right)
for i in range(len(merged_arr)):
merged_arr[i] = comb_arr[i]
return merged_arr
def merge(merge_1: list, merge_2: list):
"""
Функция собирает из двух предварительно отсортированных массивов,
поданных на вход, один и ео же возвращает
:param merge_1: - первый отсортированный список
:param merge_2: - второй отсортированный список
:return: - "слитый" из двух, отсортированный список
"""
# заполняем дополнительный массив С нулями
merged_arr = [0] * (len(merge_1) + len(merge_2))
# объявляем и обнуляем счетчики
i = k = n = 0
# разбираем в С из А или В меньший элемент, пока какой-то из А или В не закончится
while i < len(merge_1) and k < len(merge_2):
if merge_1[i] <= merge_2[k]:
merged_arr[n] = merge_1[i]
i += 1
n += 1
else:
merged_arr[n] = merge_2[k]
k += 1
n += 1
# докладываем в С остатки из А или В - где осталось.
while i < len(merge_1):
merged_arr[n] = merge_1[i]
i += 1
n += 1
while k < len(merge_2):
merged_arr[n] = merge_2[k]
k += 1
n += 1
return merged_arr
print('Merge sorted array:')
print(merge_sort(array))
print('*' * 30)
|
flexible
|
{
"blob_id": "cd1987f09ca3e09ac251b1ebdec4168fd5dbdd0e",
"index": 7607,
"step-1": "<mask token>\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('*' * 30)\nprint('Initial array:')\nprint(array)\nprint('*' * 30)\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\nprint('Merge sorted array:')\nprint(merge_sort(array))\nprint('*' * 30)\n",
"step-3": "<mask token>\nsize = 13\narray = [randint(0, 50) for x in range(size)]\nprint('*' * 30)\nprint('Initial array:')\nprint(array)\nprint('*' * 30)\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\nprint('Merge sorted array:')\nprint(merge_sort(array))\nprint('*' * 30)\n",
"step-4": "from random import randint\nsize = 13\narray = [randint(0, 50) for x in range(size)]\nprint('*' * 30)\nprint('Initial array:')\nprint(array)\nprint('*' * 30)\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\nprint('Merge sorted array:')\nprint(merge_sort(array))\nprint('*' * 30)\n",
"step-5": "# 2. Отсортируйте по возрастанию методом слияния одномерный вещественный массив,\r\n# заданный случайными числами на промежутке [0; 50).\r\n# Выведите на экран исходный и отсортированный массивы.\r\n\r\nfrom random import randint\r\n\r\n# создаем массив [0, 50) случайных чисел\r\n\r\nsize = 13\r\narray = [randint(0, 50) for x in range(size)]\r\n\r\nprint('*' * 30)\r\nprint('Initial array:')\r\nprint(array)\r\nprint('*' * 30)\r\n\r\n\r\ndef merge_sort(merged_arr: list):\r\n \"\"\"\r\n функция делит поданный на вход массив,\r\n и рекурсивно все сортирует слиянием\r\n :param merged_arr: - список на входе\r\n :return: - список отсортированный слиянием на выходе\r\n \"\"\"\r\n # если массив единичный, то \"приехали\"\r\n if len(merged_arr) <= 1:\r\n return\r\n # разбиваем начальный массив на левую и правую части\r\n middle = len(merged_arr) // 2\r\n left = merged_arr[:middle]\r\n right = merged_arr[middle:]\r\n # рекуррентно их сортируем\r\n merge_sort(left)\r\n merge_sort(right)\r\n # \"сливаем\" левую и правые части\r\n comb_arr = merge(left, right)\r\n for i in range(len(merged_arr)):\r\n merged_arr[i] = comb_arr[i]\r\n return merged_arr\r\n\r\n\r\ndef merge(merge_1: list, merge_2: list):\r\n \"\"\"\r\n Функция собирает из двух предварительно отсортированных массивов,\r\n поданных на вход, один и ео же возвращает\r\n :param merge_1: - первый отсортированный список\r\n :param merge_2: - второй отсортированный список\r\n :return: - \"слитый\" из двух, отсортированный список\r\n \"\"\"\r\n # заполняем дополнительный массив С нулями\r\n merged_arr = [0] * (len(merge_1) + len(merge_2))\r\n # объявляем и обнуляем счетчики\r\n i = k = n = 0\r\n # разбираем в С из А или В меньший элемент, пока какой-то из А или В не закончится\r\n while i < len(merge_1) and k < len(merge_2):\r\n if merge_1[i] <= merge_2[k]:\r\n merged_arr[n] = merge_1[i]\r\n i += 1\r\n n += 1\r\n else:\r\n merged_arr[n] = merge_2[k]\r\n k += 1\r\n n += 1\r\n # докладываем в С остатки из А или В - где осталось.\r\n while i < len(merge_1):\r\n merged_arr[n] = merge_1[i]\r\n i += 1\r\n n += 1\r\n while k < len(merge_2):\r\n merged_arr[n] = merge_2[k]\r\n k += 1\r\n n += 1\r\n return merged_arr\r\n\r\n\r\nprint('Merge sorted array:')\r\nprint(merge_sort(array))\r\nprint('*' * 30)\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import cv2 as cv
import numpy as np
import sys
from meio_tom_lib import *
imgname = sys.argv[1]
imgpath = "img/" + imgname
try:
img = cv.imread(imgpath)
newimg1 = jarvis_judice_ninke_1(img)*255
newimg2 = jarvis_judice_ninke_2(img)*255
cv.imshow("Imagem original",img)
cv.imshow("Jarvis, Judice e Ninke metodo 1",newimg1)
cv.imshow("Jarvis, Judice e Ninke metodo 2",newimg2)
print("")
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname,newimg1)
cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname,newimg2)
print("Resultados salvos em:")
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname)
print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname)
cv.waitKey(0)
cv.destroyAllWindows()
except:
print("Erro")
|
normal
|
{
"blob_id": "bf764457e6af25d2d9406b18af51f63b36ab823a",
"index": 8564,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n img = cv.imread(imgpath)\n newimg1 = jarvis_judice_ninke_1(img) * 255\n newimg2 = jarvis_judice_ninke_2(img) * 255\n cv.imshow('Imagem original', img)\n cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)\n cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)\n print('')\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +\n imgname, newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +\n imgname, newimg2)\n print('Resultados salvos em:')\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)\n cv.waitKey(0)\n cv.destroyAllWindows()\nexcept:\n print('Erro')\n",
"step-3": "<mask token>\nimgname = sys.argv[1]\nimgpath = 'img/' + imgname\ntry:\n img = cv.imread(imgpath)\n newimg1 = jarvis_judice_ninke_1(img) * 255\n newimg2 = jarvis_judice_ninke_2(img) * 255\n cv.imshow('Imagem original', img)\n cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)\n cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)\n print('')\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +\n imgname, newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +\n imgname, newimg2)\n print('Resultados salvos em:')\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)\n cv.waitKey(0)\n cv.destroyAllWindows()\nexcept:\n print('Erro')\n",
"step-4": "import cv2 as cv\nimport numpy as np\nimport sys\nfrom meio_tom_lib import *\nimgname = sys.argv[1]\nimgpath = 'img/' + imgname\ntry:\n img = cv.imread(imgpath)\n newimg1 = jarvis_judice_ninke_1(img) * 255\n newimg2 = jarvis_judice_ninke_2(img) * 255\n cv.imshow('Imagem original', img)\n cv.imshow('Jarvis, Judice e Ninke metodo 1', newimg1)\n cv.imshow('Jarvis, Judice e Ninke metodo 2', newimg2)\n print('')\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' +\n imgname, newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' +\n imgname, newimg2)\n print('Resultados salvos em:')\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-' + imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-' + imgname)\n cv.waitKey(0)\n cv.destroyAllWindows()\nexcept:\n print('Erro')\n",
"step-5": "import cv2 as cv\nimport numpy as np\nimport sys\nfrom meio_tom_lib import *\n\nimgname = sys.argv[1]\nimgpath = \"img/\" + imgname\n\n\ntry:\n img = cv.imread(imgpath)\n\n newimg1 = jarvis_judice_ninke_1(img)*255\n newimg2 = jarvis_judice_ninke_2(img)*255\n\n cv.imshow(\"Imagem original\",img)\n cv.imshow(\"Jarvis, Judice e Ninke metodo 1\",newimg1)\n cv.imshow(\"Jarvis, Judice e Ninke metodo 2\",newimg2)\n\n print(\"\")\n\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname,newimg1)\n cv.imwrite('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname,newimg2)\n\n print(\"Resultados salvos em:\")\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_1-'+imgname)\n print('resultados/jarvis_judice_ninke/jarvis_judice_ninke_2-'+imgname)\n\n cv.waitKey(0)\n cv.destroyAllWindows()\n \nexcept:\n print(\"Erro\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Given an unsorted integer array nums, find the smallest missing positive integer.
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
# if nums is emtpy, first pos int is 1
if not nums:
return 1
maxnum = max(nums) # for speed we assign max of nums to var maxnum
# if maxnum is neg in or 0, first pos int is 1
if maxnum < 1:
return 1
# else, for all in from 1 to maxnum + 2, return the first missing int
else:
for i in range(1, (maxnum+2)):
if i not in nums:
return i
|
normal
|
{
"blob_id": "09905d4b5ad2e59578d874db171aafb6c42db105",
"index": 8609,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def firstMissingPositive(self, nums: List[int]) ->int:\n if not nums:\n return 1\n maxnum = max(nums)\n if maxnum < 1:\n return 1\n else:\n for i in range(1, maxnum + 2):\n if i not in nums:\n return i\n",
"step-4": "# Given an unsorted integer array nums, find the smallest missing positive integer.\nclass Solution:\n def firstMissingPositive(self, nums: List[int]) -> int:\n # if nums is emtpy, first pos int is 1\n if not nums:\n return 1\n maxnum = max(nums) # for speed we assign max of nums to var maxnum\n # if maxnum is neg in or 0, first pos int is 1\n if maxnum < 1:\n return 1 \n # else, for all in from 1 to maxnum + 2, return the first missing int\n else:\n for i in range(1, (maxnum+2)):\n if i not in nums:\n return i\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .chair_model import run_chair_simulation, init_omega_t, JumpingModel, H_to_L
from .utils import load_hcp_peaks, Condition, average_peak_counts
<|reserved_special_token_1|>
from .chair_model import run_chair_simulation, init_omega_t, \
JumpingModel, H_to_L
from .utils import load_hcp_peaks, Condition, average_peak_counts
|
flexible
|
{
"blob_id": "9087a7bf42070fdb8639c616fdf7f09ad3903656",
"index": 6755,
"step-1": "<mask token>\n",
"step-2": "from .chair_model import run_chair_simulation, init_omega_t, JumpingModel, H_to_L\nfrom .utils import load_hcp_peaks, Condition, average_peak_counts\n",
"step-3": "from .chair_model import run_chair_simulation, init_omega_t, \\\n JumpingModel, H_to_L\nfrom .utils import load_hcp_peaks, Condition, average_peak_counts\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from source.ga.population import create_population, random_genome
def test_create_population(race_example):
population = create_population(race_example, 20)
assert population
def test_random_genome(race_basic):
genome = random_genome(race_basic)
assert genome
def test_random_genome_example(race_example):
genome = random_genome(race_example)
assert genome
|
normal
|
{
"blob_id": "0802aac57cd28104cdb6ff45d993aa224f80b830",
"index": 2877,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_random_genome(race_basic):\n genome = random_genome(race_basic)\n assert genome\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_create_population(race_example):\n population = create_population(race_example, 20)\n assert population\n\n\ndef test_random_genome(race_basic):\n genome = random_genome(race_basic)\n assert genome\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef test_create_population(race_example):\n population = create_population(race_example, 20)\n assert population\n\n\ndef test_random_genome(race_basic):\n genome = random_genome(race_basic)\n assert genome\n\n\ndef test_random_genome_example(race_example):\n genome = random_genome(race_example)\n assert genome\n",
"step-5": "from source.ga.population import create_population, random_genome\n\n\ndef test_create_population(race_example):\n population = create_population(race_example, 20)\n assert population\n\n\ndef test_random_genome(race_basic):\n genome = random_genome(race_basic)\n assert genome\n\n\ndef test_random_genome_example(race_example):\n genome = random_genome(race_example)\n assert genome\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .signals import get_restaurant_coordinates, count_average_price, count_total_calories
from .dish import Dish
from .ingredients import Ingredient
from .restaurants import Restaurant
|
flexible
|
{
"blob_id": "1935cab249bf559aeadf785ce7abcecb03344c04",
"index": 6058,
"step-1": "<mask token>\n",
"step-2": "from .signals import get_restaurant_coordinates, count_average_price, count_total_calories\nfrom .dish import Dish\nfrom .ingredients import Ingredient\nfrom .restaurants import Restaurant\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Convertidor de pies y pulgadas a centímetros')
<|reserved_special_token_0|>
print('{} pies y {} pulgadas son {} cm'.format(pies, pulgadas, cm))
<|reserved_special_token_1|>
print('Convertidor de pies y pulgadas a centímetros')
pies = float(input('Escriba una cantidad de pies: '))
pulgadas = float(input('Escriba una cantidad de pulgadas: '))
cm = (pies * 12 + pulgadas) * 2.54
print('{} pies y {} pulgadas son {} cm'.format(pies, pulgadas, cm))
<|reserved_special_token_1|>
print("Convertidor de pies y pulgadas a centímetros")
pies = float(input("Escriba una cantidad de pies: "))
pulgadas = float(input("Escriba una cantidad de pulgadas: "))
cm = (pies * 12 + pulgadas) * 2.54;
print("{} pies y {} pulgadas son {} cm".format(pies, pulgadas, cm))
|
flexible
|
{
"blob_id": "b0ab97f5c05cdeee4c01460109a76cef75ac72ce",
"index": 5342,
"step-1": "<mask token>\n",
"step-2": "print('Convertidor de pies y pulgadas a centímetros')\n<mask token>\nprint('{} pies y {} pulgadas son {} cm'.format(pies, pulgadas, cm))\n",
"step-3": "print('Convertidor de pies y pulgadas a centímetros')\npies = float(input('Escriba una cantidad de pies: '))\npulgadas = float(input('Escriba una cantidad de pulgadas: '))\ncm = (pies * 12 + pulgadas) * 2.54\nprint('{} pies y {} pulgadas son {} cm'.format(pies, pulgadas, cm))\n",
"step-4": "print(\"Convertidor de pies y pulgadas a centímetros\")\npies = float(input(\"Escriba una cantidad de pies: \"))\npulgadas = float(input(\"Escriba una cantidad de pulgadas: \"))\ncm = (pies * 12 + pulgadas) * 2.54;\nprint(\"{} pies y {} pulgadas son {} cm\".format(pies, pulgadas, cm))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
url = raw_input('Enter URL: ')
if len(url) < 1:
break
print('Retrieving', url)
connection = urllib.urlopen(url)
data = connection.read()
print('Retrieved', len(data), 'characters')
try:
js = json.loads(str(data))
except:
js = None
print(json.dumps(js, indent=4))
comments = js['comments']
result = 0
for comment in comments:
result += comment['count']
print('\n')
print('Result = {}'.format(result))
<|reserved_special_token_1|>
import json
import urllib
while True:
url = raw_input('Enter URL: ')
if len(url) < 1:
break
print('Retrieving', url)
connection = urllib.urlopen(url)
data = connection.read()
print('Retrieved', len(data), 'characters')
try:
js = json.loads(str(data))
except:
js = None
print(json.dumps(js, indent=4))
comments = js['comments']
result = 0
for comment in comments:
result += comment['count']
print('\n')
print('Result = {}'.format(result))
<|reserved_special_token_1|>
import json
import urllib
while True:
# Get input URL
url = raw_input("Enter URL: ")
# Check valid input
if len(url) < 1:
break
# Get data
print("Retrieving", url)
connection = urllib.urlopen(url)
data = connection.read()
print("Retrieved", len(data), "characters")
# Parse and deserialize
try:
js = json.loads(str(data))
except:
js = None
print(json.dumps(js, indent=4))
comments = js["comments"]
result = 0
for comment in comments:
result += comment["count"]
print("\n")
print("Result = {}".format(result))
|
flexible
|
{
"blob_id": "4cdd5fc15096aac01ad6d97d38ef7397859de18b",
"index": 5470,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n url = raw_input('Enter URL: ')\n if len(url) < 1:\n break\n print('Retrieving', url)\n connection = urllib.urlopen(url)\n data = connection.read()\n print('Retrieved', len(data), 'characters')\n try:\n js = json.loads(str(data))\n except:\n js = None\n print(json.dumps(js, indent=4))\n comments = js['comments']\n result = 0\n for comment in comments:\n result += comment['count']\n print('\\n')\n print('Result = {}'.format(result))\n",
"step-3": "import json\nimport urllib\nwhile True:\n url = raw_input('Enter URL: ')\n if len(url) < 1:\n break\n print('Retrieving', url)\n connection = urllib.urlopen(url)\n data = connection.read()\n print('Retrieved', len(data), 'characters')\n try:\n js = json.loads(str(data))\n except:\n js = None\n print(json.dumps(js, indent=4))\n comments = js['comments']\n result = 0\n for comment in comments:\n result += comment['count']\n print('\\n')\n print('Result = {}'.format(result))\n",
"step-4": "import json\nimport urllib\n\nwhile True:\n # Get input URL\n url = raw_input(\"Enter URL: \")\n # Check valid input\n if len(url) < 1:\n break\n\n # Get data\n print(\"Retrieving\", url)\n connection = urllib.urlopen(url)\n data = connection.read()\n print(\"Retrieved\", len(data), \"characters\")\n\n # Parse and deserialize\n try:\n js = json.loads(str(data))\n except:\n js = None\n \n print(json.dumps(js, indent=4))\n\n comments = js[\"comments\"]\n\n result = 0\n\n for comment in comments:\n result += comment[\"count\"]\n\n print(\"\\n\")\n print(\"Result = {}\".format(result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pysftp
import time
import threading
def sftp_connection():
while True:
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
try:
with pysftp.Connection('sb-emea.avl.com', username='abhishek.hingwasia@avl.com', password='AvlAvl2931!!',
cnopts=cnopts) as sftp:
print('connection has been established')
remotepath = '/Cummins_CTCI_NB/sftp_image_test/'
while True:
remotepath = '/Cummins_CTCI_NB/sftp_image_test/'
try:
if sftp.exists(remotepath):
print('hi')
time.sleep(5)
print('hello')
time.sleep(5)
except:
print('connection/ssherror exception')
break
except:
print('connection has been breaked')
time.sleep(5)
if __name__ == "__main__":
t1 = threading.Thread(target=sftp_connection)
t1.start()
|
normal
|
{
"blob_id": "676ccbac9385a4b63d599c3f85f16e28d839e9b8",
"index": 3731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sftp_connection():\n while True:\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n try:\n with pysftp.Connection('sb-emea.avl.com', username=\n 'abhishek.hingwasia@avl.com', password='AvlAvl2931!!',\n cnopts=cnopts) as sftp:\n print('connection has been established')\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n while True:\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n try:\n if sftp.exists(remotepath):\n print('hi')\n time.sleep(5)\n print('hello')\n time.sleep(5)\n except:\n print('connection/ssherror exception')\n break\n except:\n print('connection has been breaked')\n time.sleep(5)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sftp_connection():\n while True:\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n try:\n with pysftp.Connection('sb-emea.avl.com', username=\n 'abhishek.hingwasia@avl.com', password='AvlAvl2931!!',\n cnopts=cnopts) as sftp:\n print('connection has been established')\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n while True:\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n try:\n if sftp.exists(remotepath):\n print('hi')\n time.sleep(5)\n print('hello')\n time.sleep(5)\n except:\n print('connection/ssherror exception')\n break\n except:\n print('connection has been breaked')\n time.sleep(5)\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=sftp_connection)\nt1.start()\n",
"step-4": "import pysftp\nimport time\nimport threading\n\n\ndef sftp_connection():\n while True:\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n try:\n with pysftp.Connection('sb-emea.avl.com', username=\n 'abhishek.hingwasia@avl.com', password='AvlAvl2931!!',\n cnopts=cnopts) as sftp:\n print('connection has been established')\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n while True:\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n try:\n if sftp.exists(remotepath):\n print('hi')\n time.sleep(5)\n print('hello')\n time.sleep(5)\n except:\n print('connection/ssherror exception')\n break\n except:\n print('connection has been breaked')\n time.sleep(5)\n\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=sftp_connection)\nt1.start()\n",
"step-5": "import pysftp\nimport time\nimport threading\n\ndef sftp_connection():\n while True:\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n try:\n with pysftp.Connection('sb-emea.avl.com', username='abhishek.hingwasia@avl.com', password='AvlAvl2931!!',\n cnopts=cnopts) as sftp:\n print('connection has been established')\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n\n while True:\n remotepath = '/Cummins_CTCI_NB/sftp_image_test/'\n try:\n\n if sftp.exists(remotepath):\n\n\n print('hi')\n time.sleep(5)\n print('hello')\n time.sleep(5)\n except:\n print('connection/ssherror exception')\n break\n except:\n print('connection has been breaked')\n time.sleep(5)\n\nif __name__ == \"__main__\":\n t1 = threading.Thread(target=sftp_connection)\nt1.start()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python2.7
'''USAGE: completeness.py BLAST_output (tab formatted)
Prints % completeness based on marker gene BLAST of caled genes from a genome
Markers from Lan et al. (2016)
'''
import sys
with open(sys.argv[1],'r') as blastOut:
geneHits = []
orgHits = []
hits = 0.0
for line in blastOut:
hits += 1.0
currHit = line.split()[1]
currGene = currHit.split('_')[-1]
currOrg = currHit.split('_')[0]
geneHits.append(currGene)
orgHits.append(currOrg)
uniqueGenes = list(set(geneHits))
multiHits = []
for index in uniqueGenes:
if geneHits.count(index) >= 2:
multiHits.append(geneHits.count(index))
contamination = (float(sum(multiHits)) / hits) * float(len(multiHits))
contamination = round((contamination * 100.0), 2)
uniqueGenes = float(len(uniqueGenes))
completeness = round(((uniqueGenes / 73.0) * 100.0), 2)
uniqueOrgs = list(set(orgHits))
topCount = 0
hitCounts = []
topOrg = 'org'
for index in uniqueOrgs:
if orgHits.count(index) > topCount:
topCount = orgHits.count(index)
hitCounts.append(topCount)
topOrg = index
otherCount = float(hits - topCount)
uniqueOrgs = float(len(uniqueOrgs))
heterogeneity = (otherCount / float(hits)) * uniqueOrgs
heterogeneity = round((heterogeneity * 100.0), 2)
print('\nGenome bin: ' + str(sys.argv[1]))
print('Completeness: ' + str(completeness) + '%')
print('Contamination: ' + str(contamination) + '%')
print('Heterogeneity: ' + str(heterogeneity) + '%\n')
|
normal
|
{
"blob_id": "a8659ca7d7a5870fc6f62b3dfee1779e33373e7b",
"index": 8388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sys.argv[1], 'r') as blastOut:\n geneHits = []\n orgHits = []\n hits = 0.0\n for line in blastOut:\n hits += 1.0\n currHit = line.split()[1]\n currGene = currHit.split('_')[-1]\n currOrg = currHit.split('_')[0]\n geneHits.append(currGene)\n orgHits.append(currOrg)\n<mask token>\nfor index in uniqueGenes:\n if geneHits.count(index) >= 2:\n multiHits.append(geneHits.count(index))\n<mask token>\nfor index in uniqueOrgs:\n if orgHits.count(index) > topCount:\n topCount = orgHits.count(index)\n hitCounts.append(topCount)\n topOrg = index\n<mask token>\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n",
"step-3": "<mask token>\nwith open(sys.argv[1], 'r') as blastOut:\n geneHits = []\n orgHits = []\n hits = 0.0\n for line in blastOut:\n hits += 1.0\n currHit = line.split()[1]\n currGene = currHit.split('_')[-1]\n currOrg = currHit.split('_')[0]\n geneHits.append(currGene)\n orgHits.append(currOrg)\nuniqueGenes = list(set(geneHits))\nmultiHits = []\nfor index in uniqueGenes:\n if geneHits.count(index) >= 2:\n multiHits.append(geneHits.count(index))\ncontamination = float(sum(multiHits)) / hits * float(len(multiHits))\ncontamination = round(contamination * 100.0, 2)\nuniqueGenes = float(len(uniqueGenes))\ncompleteness = round(uniqueGenes / 73.0 * 100.0, 2)\nuniqueOrgs = list(set(orgHits))\ntopCount = 0\nhitCounts = []\ntopOrg = 'org'\nfor index in uniqueOrgs:\n if orgHits.count(index) > topCount:\n topCount = orgHits.count(index)\n hitCounts.append(topCount)\n topOrg = index\notherCount = float(hits - topCount)\nuniqueOrgs = float(len(uniqueOrgs))\nheterogeneity = otherCount / float(hits) * uniqueOrgs\nheterogeneity = round(heterogeneity * 100.0, 2)\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n",
"step-4": "<mask token>\nimport sys\nwith open(sys.argv[1], 'r') as blastOut:\n geneHits = []\n orgHits = []\n hits = 0.0\n for line in blastOut:\n hits += 1.0\n currHit = line.split()[1]\n currGene = currHit.split('_')[-1]\n currOrg = currHit.split('_')[0]\n geneHits.append(currGene)\n orgHits.append(currOrg)\nuniqueGenes = list(set(geneHits))\nmultiHits = []\nfor index in uniqueGenes:\n if geneHits.count(index) >= 2:\n multiHits.append(geneHits.count(index))\ncontamination = float(sum(multiHits)) / hits * float(len(multiHits))\ncontamination = round(contamination * 100.0, 2)\nuniqueGenes = float(len(uniqueGenes))\ncompleteness = round(uniqueGenes / 73.0 * 100.0, 2)\nuniqueOrgs = list(set(orgHits))\ntopCount = 0\nhitCounts = []\ntopOrg = 'org'\nfor index in uniqueOrgs:\n if orgHits.count(index) > topCount:\n topCount = orgHits.count(index)\n hitCounts.append(topCount)\n topOrg = index\notherCount = float(hits - topCount)\nuniqueOrgs = float(len(uniqueOrgs))\nheterogeneity = otherCount / float(hits) * uniqueOrgs\nheterogeneity = round(heterogeneity * 100.0, 2)\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n",
"step-5": "#!/usr/bin/python2.7\n'''USAGE: completeness.py BLAST_output (tab formatted)\nPrints % completeness based on marker gene BLAST of caled genes from a genome\nMarkers from Lan et al. (2016)\n'''\nimport sys\n\nwith open(sys.argv[1],'r') as blastOut:\n\n\tgeneHits = []\n\torgHits = []\n\thits = 0.0\n\tfor line in blastOut:\n\t\thits += 1.0\n\t\tcurrHit = line.split()[1]\n\t\tcurrGene = currHit.split('_')[-1]\n\t\tcurrOrg = currHit.split('_')[0]\n\t\tgeneHits.append(currGene)\n\t\torgHits.append(currOrg)\n\n\nuniqueGenes = list(set(geneHits))\nmultiHits = []\nfor index in uniqueGenes:\n\tif geneHits.count(index) >= 2:\n\t\tmultiHits.append(geneHits.count(index))\ncontamination = (float(sum(multiHits)) / hits) * float(len(multiHits))\ncontamination = round((contamination * 100.0), 2)\n\nuniqueGenes = float(len(uniqueGenes))\ncompleteness = round(((uniqueGenes / 73.0) * 100.0), 2)\n\nuniqueOrgs = list(set(orgHits))\ntopCount = 0\nhitCounts = []\ntopOrg = 'org'\nfor index in uniqueOrgs:\n\tif orgHits.count(index) > topCount:\n\t\ttopCount = orgHits.count(index)\n\t\thitCounts.append(topCount)\n\t\ttopOrg = index\n\notherCount = float(hits - topCount)\nuniqueOrgs = float(len(uniqueOrgs))\nheterogeneity = (otherCount / float(hits)) * uniqueOrgs\nheterogeneity = round((heterogeneity * 100.0), 2)\n\n\nprint('\\nGenome bin: ' + str(sys.argv[1]))\nprint('Completeness: ' + str(completeness) + '%')\nprint('Contamination: ' + str(contamination) + '%')\nprint('Heterogeneity: ' + str(heterogeneity) + '%\\n')\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(100, 1000):
for j in range(100, 1000):
s = str(i * j)
if s[::-1] == s:
pal.append(int(s))
print(max(pal))
<|reserved_special_token_1|>
pal = []
for i in range(100, 1000):
for j in range(100, 1000):
s = str(i * j)
if s[::-1] == s:
pal.append(int(s))
print(max(pal))
|
flexible
|
{
"blob_id": "179a9cf0713001e361f39aa30192618b392c78c7",
"index": 6972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(100, 1000):\n for j in range(100, 1000):\n s = str(i * j)\n if s[::-1] == s:\n pal.append(int(s))\nprint(max(pal))\n",
"step-3": "pal = []\nfor i in range(100, 1000):\n for j in range(100, 1000):\n s = str(i * j)\n if s[::-1] == s:\n pal.append(int(s))\nprint(max(pal))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os, sys, shutil
import fnmatch, logging, zipfile
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)
def scan_files(dir, pattern):
fileList = []
for root, subFolders, files in os.walk(dir):
for file in files:
if fnmatch.fnmatch(file, pattern):
fileList.append(os.path.join(root,file))
return fileList
if (not os.path.exists('dist')):
os.makedirs('dist')
currentDir = os.getcwd() # save current dir
os.chdir('..\\..') # go to root of simulation
distPath = os.path.join(currentDir, 'bundle') # where to put files
scanData = [
['WSN\\simulations', '*.ned', '', True],
['WSN\\simulations', '*.xml', '', True],
['WSN\\simulations', '*.exe', '', True],
['WSN\\simulations', '*.ini', '', True],
['WSN\\src', '*.ned', '', True],
['WSN\\src', '*.dll', '', True],
['MiXiM\\src', '*.ned', '', True],
['MiXiM\\src', '*.dll', '', True],
['MiXiM\\src\\base', '*.dll', 'lib', False],
['MiXiM\\src\\modules', '*.dll', 'lib', False],
[os.path.join(currentDir, 'lib'), '*.dll', 'lib', False],
]
# remove old bundle
if (os.path.exists(distPath)):
shutil.rmtree(distPath)
# copy neccessary files
for data in scanData:
for file in scan_files(data[0], data[1]):
if (data[3]):
newSubPath = file
else:
newSubPath = os.path.basename(file)
newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))
newDir = os.path.dirname(newPath)
if (not os.path.exists(newDir)):
os.makedirs(newDir)
logging.info('Copying %s to %s' % (file, newPath))
shutil.copyfile(file, newPath)
logging.info("Creating archive")
bundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', "bundle.zip"), 'w', zipfile.ZIP_DEFLATED)
for root, subFolders, files in os.walk(distPath):
for file in files:
# make path relative to distPath
newPath = os.path.join(root, file).replace(distPath, '')
# add files to zip
bundleZip.write(os.path.join(root, file), newPath)
bundleZip.close()
logging.info("Done")
os.chdir(currentDir) # go back
|
normal
|
{
"blob_id": "187c2a56ba9360b89c8ded09861091e2deedf32e",
"index": 7783,
"step-1": "<mask token>\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\nif not os.path.exists('dist'):\n os.makedirs('dist')\n<mask token>\nos.chdir('..\\\\..')\n<mask token>\nif os.path.exists(distPath):\n shutil.rmtree(distPath)\nfor data in scanData:\n for file in scan_files(data[0], data[1]):\n if data[3]:\n newSubPath = file\n else:\n newSubPath = os.path.basename(file)\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\n newDir = os.path.dirname(newPath)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n logging.info('Copying %s to %s' % (file, newPath))\n shutil.copyfile(file, newPath)\nlogging.info('Creating archive')\n<mask token>\nfor root, subFolders, files in os.walk(distPath):\n for file in files:\n newPath = os.path.join(root, file).replace(distPath, '')\n bundleZip.write(os.path.join(root, file), newPath)\nbundleZip.close()\nlogging.info('Done')\nos.chdir(currentDir)\n",
"step-3": "<mask token>\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\nif not os.path.exists('dist'):\n os.makedirs('dist')\ncurrentDir = os.getcwd()\nos.chdir('..\\\\..')\ndistPath = os.path.join(currentDir, 'bundle')\nscanData = [['WSN\\\\simulations', '*.ned', '', True], ['WSN\\\\simulations',\n '*.xml', '', True], ['WSN\\\\simulations', '*.exe', '', True], [\n 'WSN\\\\simulations', '*.ini', '', True], ['WSN\\\\src', '*.ned', '', True],\n ['WSN\\\\src', '*.dll', '', True], ['MiXiM\\\\src', '*.ned', '', True], [\n 'MiXiM\\\\src', '*.dll', '', True], ['MiXiM\\\\src\\\\base', '*.dll', 'lib', \n False], ['MiXiM\\\\src\\\\modules', '*.dll', 'lib', False], [os.path.join(\n currentDir, 'lib'), '*.dll', 'lib', False]]\nif os.path.exists(distPath):\n shutil.rmtree(distPath)\nfor data in scanData:\n for file in scan_files(data[0], data[1]):\n if data[3]:\n newSubPath = file\n else:\n newSubPath = os.path.basename(file)\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\n newDir = os.path.dirname(newPath)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n logging.info('Copying %s to %s' % (file, newPath))\n shutil.copyfile(file, newPath)\nlogging.info('Creating archive')\nbundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', 'bundle.zip'),\n 'w', zipfile.ZIP_DEFLATED)\nfor root, subFolders, files in os.walk(distPath):\n for file in files:\n newPath = os.path.join(root, file).replace(distPath, '')\n bundleZip.write(os.path.join(root, file), newPath)\nbundleZip.close()\nlogging.info('Done')\nos.chdir(currentDir)\n",
"step-4": "import os, sys, shutil\nimport fnmatch, logging, zipfile\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\nif not os.path.exists('dist'):\n os.makedirs('dist')\ncurrentDir = os.getcwd()\nos.chdir('..\\\\..')\ndistPath = os.path.join(currentDir, 'bundle')\nscanData = [['WSN\\\\simulations', '*.ned', '', True], ['WSN\\\\simulations',\n '*.xml', '', True], ['WSN\\\\simulations', '*.exe', '', True], [\n 'WSN\\\\simulations', '*.ini', '', True], ['WSN\\\\src', '*.ned', '', True],\n ['WSN\\\\src', '*.dll', '', True], ['MiXiM\\\\src', '*.ned', '', True], [\n 'MiXiM\\\\src', '*.dll', '', True], ['MiXiM\\\\src\\\\base', '*.dll', 'lib', \n False], ['MiXiM\\\\src\\\\modules', '*.dll', 'lib', False], [os.path.join(\n currentDir, 'lib'), '*.dll', 'lib', False]]\nif os.path.exists(distPath):\n shutil.rmtree(distPath)\nfor data in scanData:\n for file in scan_files(data[0], data[1]):\n if data[3]:\n newSubPath = file\n else:\n newSubPath = os.path.basename(file)\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\n newDir = os.path.dirname(newPath)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n logging.info('Copying %s to %s' % (file, newPath))\n shutil.copyfile(file, newPath)\nlogging.info('Creating archive')\nbundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', 'bundle.zip'),\n 'w', zipfile.ZIP_DEFLATED)\nfor root, subFolders, files in os.walk(distPath):\n for file in files:\n newPath = os.path.join(root, file).replace(distPath, '')\n bundleZip.write(os.path.join(root, file), newPath)\nbundleZip.close()\nlogging.info('Done')\nos.chdir(currentDir)\n",
"step-5": "import os, sys, shutil \r\nimport fnmatch, logging, zipfile\r\n\r\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\r\n\r\ndef scan_files(dir, pattern):\r\n fileList = []\r\n for root, subFolders, files in os.walk(dir):\r\n for file in files:\r\n if fnmatch.fnmatch(file, pattern):\r\n fileList.append(os.path.join(root,file))\r\n return fileList\r\n\r\nif (not os.path.exists('dist')):\r\n os.makedirs('dist')\r\n \r\ncurrentDir = os.getcwd() # save current dir\r\nos.chdir('..\\\\..') # go to root of simulation\r\n\r\ndistPath = os.path.join(currentDir, 'bundle') # where to put files\r\nscanData = [\r\n ['WSN\\\\simulations', '*.ned', '', True],\r\n ['WSN\\\\simulations', '*.xml', '', True],\r\n ['WSN\\\\simulations', '*.exe', '', True],\r\n ['WSN\\\\simulations', '*.ini', '', True],\r\n ['WSN\\\\src', '*.ned', '', True],\r\n ['WSN\\\\src', '*.dll', '', True],\r\n ['MiXiM\\\\src', '*.ned', '', True],\r\n ['MiXiM\\\\src', '*.dll', '', True],\r\n ['MiXiM\\\\src\\\\base', '*.dll', 'lib', False],\r\n ['MiXiM\\\\src\\\\modules', '*.dll', 'lib', False],\r\n [os.path.join(currentDir, 'lib'), '*.dll', 'lib', False],\r\n]\r\n\r\n# remove old bundle\r\nif (os.path.exists(distPath)):\r\n shutil.rmtree(distPath)\r\n\r\n# copy neccessary files\r\nfor data in scanData:\r\n \r\n for file in scan_files(data[0], data[1]):\r\n \r\n if (data[3]):\r\n newSubPath = file \r\n else:\r\n newSubPath = os.path.basename(file)\r\n \r\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\r\n newDir = os.path.dirname(newPath)\r\n \r\n if (not os.path.exists(newDir)):\r\n os.makedirs(newDir)\r\n \r\n logging.info('Copying %s to %s' % (file, newPath))\r\n shutil.copyfile(file, newPath)\r\n\r\nlogging.info(\"Creating archive\")\r\nbundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', \"bundle.zip\"), 'w', zipfile.ZIP_DEFLATED)\r\nfor root, subFolders, files in os.walk(distPath):\r\n for file in files:\r\n # make path relative to distPath\r\n newPath = os.path.join(root, file).replace(distPath, '')\r\n # add files to zip\r\n bundleZip.write(os.path.join(root, file), newPath)\r\nbundleZip.close()\r\nlogging.info(\"Done\")\r\n\r\nos.chdir(currentDir) # go back",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from .plutotv_html import PlutoTV_HTML
class Plugin_OBJ():
def __init__(self, fhdhr, plugin_utils):
self.fhdhr = fhdhr
self.plugin_utils = plugin_utils
self.plutotv_html = PlutoTV_HTML(fhdhr, plugin_utils)
|
normal
|
{
"blob_id": "ee0cf2325c94821fa9f5115e8848c71143eabdbf",
"index": 4775,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Plugin_OBJ:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Plugin_OBJ:\n\n def __init__(self, fhdhr, plugin_utils):\n self.fhdhr = fhdhr\n self.plugin_utils = plugin_utils\n self.plutotv_html = PlutoTV_HTML(fhdhr, plugin_utils)\n",
"step-4": "from .plutotv_html import PlutoTV_HTML\n\n\nclass Plugin_OBJ:\n\n def __init__(self, fhdhr, plugin_utils):\n self.fhdhr = fhdhr\n self.plugin_utils = plugin_utils\n self.plutotv_html = PlutoTV_HTML(fhdhr, plugin_utils)\n",
"step-5": "\nfrom .plutotv_html import PlutoTV_HTML\n\n\nclass Plugin_OBJ():\n\n def __init__(self, fhdhr, plugin_utils):\n self.fhdhr = fhdhr\n self.plugin_utils = plugin_utils\n\n self.plutotv_html = PlutoTV_HTML(fhdhr, plugin_utils)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
nx.draw(g)
nx.draw(h)
plt.show()
nx.write_gexf(g, 'test.gexf')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
g = nx.Graph()
g = nx.complete_graph(10)
h = nx.gnp_random_graph(10, 0.5)
nx.draw(g)
nx.draw(h)
plt.show()
nx.write_gexf(g, 'test.gexf')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import networkx as nx
import matplotlib.pyplot as plt
g = nx.Graph()
g = nx.complete_graph(10)
h = nx.gnp_random_graph(10, 0.5)
nx.draw(g)
nx.draw(h)
plt.show()
nx.write_gexf(g, 'test.gexf')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 20:06:32 2020
@author: Supriyo
"""
import networkx as nx
import matplotlib.pyplot as plt
g=nx.Graph()
#l=[1,2,3]
# g.add_node(1)
# g.add_node(2)
# g.add_node(3)
# g.add_nodes_from(l)
# g.add_edge(1,2)
# g.add_edge(2,3)
# g.add_edge(3,1)
# print(g.nodes())
# print(g.edges())
g=nx.complete_graph(10)
h=nx.gnp_random_graph(10,0.5)#0.55 is the probability
nx.draw(g)
nx.draw(h)
plt.show()
nx.write_gexf(g,"test.gexf")
|
flexible
|
{
"blob_id": "3bfa9d42e3fd61cf6b7ffaac687f66c2f4bc073e",
"index": 3906,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnx.draw(g)\nnx.draw(h)\nplt.show()\nnx.write_gexf(g, 'test.gexf')\n",
"step-3": "<mask token>\ng = nx.Graph()\ng = nx.complete_graph(10)\nh = nx.gnp_random_graph(10, 0.5)\nnx.draw(g)\nnx.draw(h)\nplt.show()\nnx.write_gexf(g, 'test.gexf')\n",
"step-4": "<mask token>\nimport networkx as nx\nimport matplotlib.pyplot as plt\ng = nx.Graph()\ng = nx.complete_graph(10)\nh = nx.gnp_random_graph(10, 0.5)\nnx.draw(g)\nnx.draw(h)\nplt.show()\nnx.write_gexf(g, 'test.gexf')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 9 20:06:32 2020\r\n\r\n@author: Supriyo\r\n\"\"\"\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt \r\n\r\ng=nx.Graph()\r\n\r\n\r\n#l=[1,2,3]\r\n\r\n# g.add_node(1)\r\n# g.add_node(2)\r\n# g.add_node(3)\r\n # g.add_nodes_from(l)\r\n \r\n # g.add_edge(1,2)\r\n # g.add_edge(2,3)\r\n # g.add_edge(3,1)\r\n\r\n# print(g.nodes())\r\n# print(g.edges())\r\n\r\ng=nx.complete_graph(10)\r\n\r\nh=nx.gnp_random_graph(10,0.5)#0.55 is the probability\r\nnx.draw(g)\r\nnx.draw(h)\r\nplt.show()\r\n\r\nnx.write_gexf(g,\"test.gexf\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
class ScggjyList(models.Model):
title = models.CharField(max_length=255)
pubData = models.CharField(db_column='pubData', max_length=255)
detailLink = models.CharField(db_column='detailLink', max_length=255)
detailTitle = models.CharField(db_column='detailTitle', max_length=255)
class Meta:
managed = False
db_table = 'scggjy_list'
class ZakerNews(models.Model):
zTitle = models.CharField(db_column='zTitle', unique=True, max_length=255, blank=True, null=True)
zSubtitle = models.CharField(db_column='zSubtitle', max_length=255, blank=True, null=True)
sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=255, blank=True, null=True)
zDetailLink = models.CharField(db_column='zDetailLink', max_length=255, blank=True, null=True)
zType = models.CharField(db_column='zType', max_length=20, blank=True, null=True)
class Meta:
managed = False
db_table = 'zaker_news'
class ZakerNewsTab(models.Model):
code = models.IntegerField(blank=True, null=True)
tabName = models.CharField(db_column='tabName', max_length=20, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'zaker_news_tab'
class BxtZbgg(models.Model):
area = models.CharField(max_length=20, blank=True, null=True)
city = models.CharField(max_length=25, blank=True, null=True)
ywtype = models.CharField(db_column='ywType', max_length=32, blank=True, null=True) # Field name made lowercase.
xxtype = models.CharField(db_column='xxType', max_length=40, blank=True, null=True) # Field name made lowercase.
type = models.CharField(max_length=40, blank=True, null=True)
ly = models.CharField(max_length=50, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, null=True)
pubdata = models.CharField(db_column='pubData', max_length=30, blank=True, null=True) # Field name made lowercase.
deaddata = models.CharField(db_column='deadData', max_length=30, blank=True, null=True) # Field name made lowercase.
status = models.CharField(max_length=20, blank=True, null=True)
itemnum = models.CharField(db_column='itemNum', max_length=100, blank=True, null=True) # Field name made lowercase.
detailurl = models.CharField(db_column='detailUrl', unique=True, max_length=255, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'bxt_zbgg'
|
normal
|
{
"blob_id": "951fafe9f1b9a3273f30d101831d1e59e26fe85d",
"index": 1535,
"step-1": "<mask token>\n\n\nclass ZakerNewsTab(models.Model):\n code = models.IntegerField(blank=True, null=True)\n tabName = models.CharField(db_column='tabName', max_length=20, blank=\n True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news_tab'\n\n\nclass BxtZbgg(models.Model):\n area = models.CharField(max_length=20, blank=True, null=True)\n city = models.CharField(max_length=25, blank=True, null=True)\n ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,\n null=True)\n xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,\n null=True)\n type = models.CharField(max_length=40, blank=True, null=True)\n ly = models.CharField(max_length=50, blank=True, null=True)\n title = models.CharField(max_length=255, blank=True, null=True)\n pubdata = models.CharField(db_column='pubData', max_length=30, blank=\n True, null=True)\n deaddata = models.CharField(db_column='deadData', max_length=30, blank=\n True, null=True)\n status = models.CharField(max_length=20, blank=True, null=True)\n itemnum = models.CharField(db_column='itemNum', max_length=100, blank=\n True, null=True)\n detailurl = models.CharField(db_column='detailUrl', unique=True,\n max_length=255, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'bxt_zbgg'\n",
"step-2": "<mask token>\n\n\nclass ScggjyList(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'scggjy_list'\n\n\nclass ZakerNews(models.Model):\n zTitle = models.CharField(db_column='zTitle', unique=True, max_length=\n 255, blank=True, null=True)\n zSubtitle = models.CharField(db_column='zSubtitle', max_length=255,\n blank=True, null=True)\n sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=\n 255, blank=True, null=True)\n zDetailLink = models.CharField(db_column='zDetailLink', max_length=255,\n blank=True, null=True)\n zType = models.CharField(db_column='zType', max_length=20, blank=True,\n null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news'\n\n\nclass ZakerNewsTab(models.Model):\n code = models.IntegerField(blank=True, null=True)\n tabName = models.CharField(db_column='tabName', max_length=20, blank=\n True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news_tab'\n\n\nclass BxtZbgg(models.Model):\n area = models.CharField(max_length=20, blank=True, null=True)\n city = models.CharField(max_length=25, blank=True, null=True)\n ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,\n null=True)\n xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,\n null=True)\n type = models.CharField(max_length=40, blank=True, null=True)\n ly = models.CharField(max_length=50, blank=True, null=True)\n title = models.CharField(max_length=255, blank=True, null=True)\n pubdata = models.CharField(db_column='pubData', max_length=30, blank=\n True, null=True)\n deaddata = models.CharField(db_column='deadData', max_length=30, blank=\n True, null=True)\n status = models.CharField(max_length=20, blank=True, null=True)\n itemnum = models.CharField(db_column='itemNum', max_length=100, blank=\n True, null=True)\n detailurl = models.CharField(db_column='detailUrl', unique=True,\n max_length=255, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'bxt_zbgg'\n",
"step-3": "<mask token>\n\n\nclass ScggjyList(models.Model):\n title = models.CharField(max_length=255)\n pubData = models.CharField(db_column='pubData', max_length=255)\n detailLink = models.CharField(db_column='detailLink', max_length=255)\n detailTitle = models.CharField(db_column='detailTitle', max_length=255)\n\n\n class Meta:\n managed = False\n db_table = 'scggjy_list'\n\n\nclass ZakerNews(models.Model):\n zTitle = models.CharField(db_column='zTitle', unique=True, max_length=\n 255, blank=True, null=True)\n zSubtitle = models.CharField(db_column='zSubtitle', max_length=255,\n blank=True, null=True)\n sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=\n 255, blank=True, null=True)\n zDetailLink = models.CharField(db_column='zDetailLink', max_length=255,\n blank=True, null=True)\n zType = models.CharField(db_column='zType', max_length=20, blank=True,\n null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news'\n\n\nclass ZakerNewsTab(models.Model):\n code = models.IntegerField(blank=True, null=True)\n tabName = models.CharField(db_column='tabName', max_length=20, blank=\n True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news_tab'\n\n\nclass BxtZbgg(models.Model):\n area = models.CharField(max_length=20, blank=True, null=True)\n city = models.CharField(max_length=25, blank=True, null=True)\n ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,\n null=True)\n xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,\n null=True)\n type = models.CharField(max_length=40, blank=True, null=True)\n ly = models.CharField(max_length=50, blank=True, null=True)\n title = models.CharField(max_length=255, blank=True, null=True)\n pubdata = models.CharField(db_column='pubData', max_length=30, blank=\n True, null=True)\n deaddata = models.CharField(db_column='deadData', max_length=30, blank=\n True, null=True)\n status = models.CharField(max_length=20, blank=True, null=True)\n itemnum = models.CharField(db_column='itemNum', max_length=100, blank=\n True, null=True)\n detailurl = models.CharField(db_column='detailUrl', unique=True,\n max_length=255, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'bxt_zbgg'\n",
"step-4": "from django.db import models\n\n\nclass ScggjyList(models.Model):\n title = models.CharField(max_length=255)\n pubData = models.CharField(db_column='pubData', max_length=255)\n detailLink = models.CharField(db_column='detailLink', max_length=255)\n detailTitle = models.CharField(db_column='detailTitle', max_length=255)\n\n\n class Meta:\n managed = False\n db_table = 'scggjy_list'\n\n\nclass ZakerNews(models.Model):\n zTitle = models.CharField(db_column='zTitle', unique=True, max_length=\n 255, blank=True, null=True)\n zSubtitle = models.CharField(db_column='zSubtitle', max_length=255,\n blank=True, null=True)\n sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=\n 255, blank=True, null=True)\n zDetailLink = models.CharField(db_column='zDetailLink', max_length=255,\n blank=True, null=True)\n zType = models.CharField(db_column='zType', max_length=20, blank=True,\n null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news'\n\n\nclass ZakerNewsTab(models.Model):\n code = models.IntegerField(blank=True, null=True)\n tabName = models.CharField(db_column='tabName', max_length=20, blank=\n True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'zaker_news_tab'\n\n\nclass BxtZbgg(models.Model):\n area = models.CharField(max_length=20, blank=True, null=True)\n city = models.CharField(max_length=25, blank=True, null=True)\n ywtype = models.CharField(db_column='ywType', max_length=32, blank=True,\n null=True)\n xxtype = models.CharField(db_column='xxType', max_length=40, blank=True,\n null=True)\n type = models.CharField(max_length=40, blank=True, null=True)\n ly = models.CharField(max_length=50, blank=True, null=True)\n title = models.CharField(max_length=255, blank=True, null=True)\n pubdata = models.CharField(db_column='pubData', max_length=30, blank=\n True, null=True)\n deaddata = models.CharField(db_column='deadData', max_length=30, blank=\n True, null=True)\n status = models.CharField(max_length=20, blank=True, null=True)\n itemnum = models.CharField(db_column='itemNum', max_length=100, blank=\n True, null=True)\n detailurl = models.CharField(db_column='detailUrl', unique=True,\n max_length=255, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'bxt_zbgg'\n",
"step-5": "from django.db import models\n\n\nclass ScggjyList(models.Model):\n title = models.CharField(max_length=255)\n pubData = models.CharField(db_column='pubData', max_length=255)\n detailLink = models.CharField(db_column='detailLink', max_length=255)\n detailTitle = models.CharField(db_column='detailTitle', max_length=255)\n\n class Meta:\n managed = False\n db_table = 'scggjy_list'\n\n\nclass ZakerNews(models.Model):\n zTitle = models.CharField(db_column='zTitle', unique=True, max_length=255, blank=True, null=True)\n zSubtitle = models.CharField(db_column='zSubtitle', max_length=255, blank=True, null=True)\n sSubImageLink = models.CharField(db_column='sSubImageLink', max_length=255, blank=True, null=True)\n zDetailLink = models.CharField(db_column='zDetailLink', max_length=255, blank=True, null=True)\n zType = models.CharField(db_column='zType', max_length=20, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'zaker_news'\n\nclass ZakerNewsTab(models.Model):\n code = models.IntegerField(blank=True, null=True)\n tabName = models.CharField(db_column='tabName', max_length=20, blank=True, null=True) # Field name made lowercase.\n\n class Meta:\n managed = False\n db_table = 'zaker_news_tab'\n\n\nclass BxtZbgg(models.Model):\n area = models.CharField(max_length=20, blank=True, null=True)\n city = models.CharField(max_length=25, blank=True, null=True)\n ywtype = models.CharField(db_column='ywType', max_length=32, blank=True, null=True) # Field name made lowercase.\n xxtype = models.CharField(db_column='xxType', max_length=40, blank=True, null=True) # Field name made lowercase.\n type = models.CharField(max_length=40, blank=True, null=True)\n ly = models.CharField(max_length=50, blank=True, null=True)\n title = models.CharField(max_length=255, blank=True, null=True)\n pubdata = models.CharField(db_column='pubData', max_length=30, blank=True, null=True) # Field name made lowercase.\n deaddata = models.CharField(db_column='deadData', max_length=30, blank=True, null=True) # Field name made lowercase.\n status = models.CharField(max_length=20, blank=True, null=True)\n itemnum = models.CharField(db_column='itemNum', max_length=100, blank=True, null=True) # Field name made lowercase.\n detailurl = models.CharField(db_column='detailUrl', unique=True, max_length=255, blank=True, null=True) # Field name made lowercase.\n\n class Meta:\n managed = False\n db_table = 'bxt_zbgg'\n\n\n\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
"""
Декоратор parser_stop - парсер результата вывода комманды docker stop.
"""
from functools import wraps
def parser_stop(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
stdout = result['stdout']
"""
stdout: строки разделены \n
"""
data = stdout.split('\n')
result['data'] = data[0]
return result
return wrapper
|
normal
|
{
"blob_id": "4af573fa17f86ee067b870dce1f6ee482d1b14ff",
"index": 8281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parser_stop(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n stdout = result['stdout']\n \"\"\"\n stdout: строки разделены \n\n \"\"\"\n data = stdout.split('\\n')\n result['data'] = data[0]\n return result\n return wrapper\n",
"step-3": "<mask token>\nfrom functools import wraps\n\n\ndef parser_stop(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n stdout = result['stdout']\n \"\"\"\n stdout: строки разделены \n\n \"\"\"\n data = stdout.split('\\n')\n result['data'] = data[0]\n return result\n return wrapper\n",
"step-4": "\"\"\"\nДекоратор parser_stop - парсер результата вывода комманды docker stop.\n\"\"\"\n\nfrom functools import wraps\n\n\ndef parser_stop(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n stdout = result['stdout']\n\n \"\"\"\n stdout: строки разделены \\n\n \"\"\"\n\n data = stdout.split('\\n')\n result['data'] = data[0]\n\n return result\n\n return wrapper\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [url('^add/$', views.cart_add, name='add'), url('^count/$',
views.cart_count, name='count'), url('^del/$', views.cart_del, name=
'delete'), url('update/$', views.cart_update, name='update'), url('^&',
views.cart_show, name='show')]
<|reserved_special_token_1|>
from django.conf.urls import url
from cart import views
urlpatterns = [url('^add/$', views.cart_add, name='add'), url('^count/$',
views.cart_count, name='count'), url('^del/$', views.cart_del, name=
'delete'), url('update/$', views.cart_update, name='update'), url('^&',
views.cart_show, name='show')]
<|reserved_special_token_1|>
from django.conf.urls import url
from cart import views
urlpatterns=[
url(r'^add/$',views.cart_add,name='add'),#t添加购物车数据
url(r'^count/$',views.cart_count,name='count'),#huo获取购物车商品数量
url(r'^del/$',views.cart_del,name='delete'),#删除购物车商品记录
url(r'update/$',views.cart_update,name='update'),#更新购物车商品数目
url(r'^&',views.cart_show,name='show'),#显示用户购物车页面
]
|
flexible
|
{
"blob_id": "5b3a6b44bd9ea80da1983d8254c73bba3e2338e1",
"index": 5166,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^add/$', views.cart_add, name='add'), url('^count/$',\n views.cart_count, name='count'), url('^del/$', views.cart_del, name=\n 'delete'), url('update/$', views.cart_update, name='update'), url('^&',\n views.cart_show, name='show')]\n",
"step-3": "from django.conf.urls import url\nfrom cart import views\nurlpatterns = [url('^add/$', views.cart_add, name='add'), url('^count/$',\n views.cart_count, name='count'), url('^del/$', views.cart_del, name=\n 'delete'), url('update/$', views.cart_update, name='update'), url('^&',\n views.cart_show, name='show')]\n",
"step-4": "from django.conf.urls import url \nfrom cart import views\n\nurlpatterns=[\n url(r'^add/$',views.cart_add,name='add'),#t添加购物车数据\n url(r'^count/$',views.cart_count,name='count'),#huo获取购物车商品数量\n url(r'^del/$',views.cart_del,name='delete'),#删除购物车商品记录\n url(r'update/$',views.cart_update,name='update'),#更新购物车商品数目\n url(r'^&',views.cart_show,name='show'),#显示用户购物车页面\n\n]\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
with open('rosalind_ba3d.txt','r') as f:
kmer_length = int(f.readline().strip())
seq = f.readline().strip()
dict = {}
for offset in range(len(seq)-kmer_length+1):
prefix = seq[offset:offset+kmer_length-1]
suffix = seq[offset+1:offset+kmer_length]
if prefix in dict:
dict[prefix].append(suffix)
else:
dict[prefix] = [suffix]
for key in sorted(dict):
print(key + " -> " + ','.join(sorted(dict[key])))
|
normal
|
{
"blob_id": "050f060bb9d3d46f8b87c9802356bd0da8f926f8",
"index": 6244,
"step-1": "<mask token>\n",
"step-2": "with open('rosalind_ba3d.txt', 'r') as f:\n kmer_length = int(f.readline().strip())\n seq = f.readline().strip()\n<mask token>\nfor offset in range(len(seq) - kmer_length + 1):\n prefix = seq[offset:offset + kmer_length - 1]\n suffix = seq[offset + 1:offset + kmer_length]\n if prefix in dict:\n dict[prefix].append(suffix)\n else:\n dict[prefix] = [suffix]\nfor key in sorted(dict):\n print(key + ' -> ' + ','.join(sorted(dict[key])))\n",
"step-3": "with open('rosalind_ba3d.txt', 'r') as f:\n kmer_length = int(f.readline().strip())\n seq = f.readline().strip()\ndict = {}\nfor offset in range(len(seq) - kmer_length + 1):\n prefix = seq[offset:offset + kmer_length - 1]\n suffix = seq[offset + 1:offset + kmer_length]\n if prefix in dict:\n dict[prefix].append(suffix)\n else:\n dict[prefix] = [suffix]\nfor key in sorted(dict):\n print(key + ' -> ' + ','.join(sorted(dict[key])))\n",
"step-4": "with open('rosalind_ba3d.txt','r') as f:\r\n\tkmer_length = int(f.readline().strip())\r\n\tseq = f.readline().strip()\r\n\r\ndict = {}\r\nfor offset in range(len(seq)-kmer_length+1):\r\n\tprefix = seq[offset:offset+kmer_length-1]\r\n\tsuffix = seq[offset+1:offset+kmer_length]\r\n\tif prefix in dict:\r\n\t\tdict[prefix].append(suffix)\r\n\telse:\r\n\t\tdict[prefix] = [suffix]\r\n\r\nfor key in sorted(dict):\r\n\tprint(key + \" -> \" + ','.join(sorted(dict[key])))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
<|reserved_special_token_0|>
print('-' * 20)
<|reserved_special_token_0|>
for name in names:
name = name.upper()
if 'D' in name:
print(name)
<|reserved_special_token_1|>
data = (
' Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu'
)
lst = data.split(',')
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
girlsdata = 'Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya'
print('-' * 20)
names = girlsdata.split(',')
for name in names:
name = name.upper()
if 'D' in name:
print(name)
<|reserved_special_token_1|>
data = " Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu"
lst = data.split(",")
for name in lst:
name = name.strip().upper()
rname = name[::-1]
if name == rname:
print(name)
girlsdata = "Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya"
# Name which start with DEE get those name
print("-"*20)
names = girlsdata.split(",")
for name in names:
name = name.upper()
if "D" in name:
print(name)
|
flexible
|
{
"blob_id": "622b388beb56eba85bbb08510c2bcea55f23da9a",
"index": 721,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\n<mask token>\nprint('-' * 20)\n<mask token>\nfor name in names:\n name = name.upper()\n if 'D' in name:\n print(name)\n",
"step-3": "data = (\n ' Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu'\n )\nlst = data.split(',')\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\ngirlsdata = 'Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya'\nprint('-' * 20)\nnames = girlsdata.split(',')\nfor name in names:\n name = name.upper()\n if 'D' in name:\n print(name)\n",
"step-4": "data = \" Ramya , Deepa,LIRIL ,amma, dad, Kiran, 12321 , Suresh, Jayesh, Ramesh,Balu\"\n\nlst = data.split(\",\")\n\nfor name in lst:\n name = name.strip().upper()\n rname = name[::-1]\n if name == rname:\n print(name)\n\ngirlsdata = \"Tanvi,Dhatri,Haadya,Deepthi,Deepa,Ramya\"\n# Name which start with DEE get those name\nprint(\"-\"*20)\nnames = girlsdata.split(\",\")\nfor name in names:\n name = name.upper()\n if \"D\" in name:\n print(name)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train_once():
os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &')
os.system('xterm -e "pwd ; ./start.sh " &')
return True
<|reserved_special_token_1|>
import os
import time
def train_once():
os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &')
os.system('xterm -e "pwd ; ./start.sh " &')
return True
<|reserved_special_token_1|>
import os
import time
#if __name__ == "__main__":
# os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml ; echo press RETURN to close this window ; read" &') # delete the echo and the read to don't stop the process and make it run quickly
# os.system('xterm -e "pwd ; ./start.sh ; echo press RETURN to close this window ; read" &')
def train_once():
os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &') # delete the echo and the read to don't stop the process and make it run quickly
os.system('xterm -e "pwd ; ./start.sh " &')
return True
|
flexible
|
{
"blob_id": "c2cf74893c7f7515a95141bb10be6a446b45a0cc",
"index": 1447,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_once():\n os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml \" &')\n os.system('xterm -e \"pwd ; ./start.sh \" &')\n return True\n",
"step-3": "import os\nimport time\n\n\ndef train_once():\n os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml \" &')\n os.system('xterm -e \"pwd ; ./start.sh \" &')\n return True\n",
"step-4": "import os\nimport time\n#if __name__ == \"__main__\":\n# os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml ; echo press RETURN to close this window ; read\" &') # delete the echo and the read to don't stop the process and make it run quickly\n# os.system('xterm -e \"pwd ; ./start.sh ; echo press RETURN to close this window ; read\" &')\n\ndef train_once():\n os.system('xterm -e \"pwd ; cd ~ ; torcs -r ~/quickrace.xml \" &') # delete the echo and the read to don't stop the process and make it run quickly\n os.system('xterm -e \"pwd ; ./start.sh \" &')\n return True\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def clear(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def clear(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
def get_top(self):
return self.items[len(self.items) - 1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def clear(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
def get_top(self):
return self.items[len(self.items) - 1]
def main():
s = Stack()
print(s.is_empty())
s.push(4)
s.push('dog')
print(s.get_top())
s.push(True)
print(s.size())
print(s.is_empty())
s.push(8.4)
print(s.pop())
print(s.pop())
print(s.size())
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack(object):
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def clear(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
def get_top(self):
return self.items[len(self.items) - 1]
def main():
s = Stack()
print(s.is_empty())
s.push(4)
s.push('dog')
print(s.get_top())
s.push(True)
print(s.size())
print(s.is_empty())
s.push(8.4)
print(s.pop())
print(s.pop())
print(s.size())
pass
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!usr/bin/python
# -*- coding:UTF-8 -*-
'''
Introduction:
Implementation of Stack
Created on: Oct 28, 2014
@author: ICY
'''
#-------------------------FUNCTION---------------------------#
class Stack(object):
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def clear(self):
self.items = []
def push(self,item):
self.items.append(item)
def pop(self):
return self.items.pop()
def size(self):
return len(self.items)
def get_top(self):
return self.items[len(self.items)-1]
#----------------------------SELF TEST----------------------------#
def main():
s=Stack()
print(s.is_empty())
s.push(4)
s.push('dog')
print(s.get_top())
s.push(True)
print(s.size())
print(s.is_empty())
s.push(8.4)
print(s.pop())
print(s.pop())
print(s.size())
pass
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "6fa9dfadc60108e1718c6688f07de877b0ac0afd",
"index": 5885,
"step-1": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.items = []\n\n def is_empty(self):\n return self.items == []\n\n def clear(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.items = []\n\n def is_empty(self):\n return self.items == []\n\n def clear(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n def get_top(self):\n return self.items[len(self.items) - 1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.items = []\n\n def is_empty(self):\n return self.items == []\n\n def clear(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n def get_top(self):\n return self.items[len(self.items) - 1]\n\n\ndef main():\n s = Stack()\n print(s.is_empty())\n s.push(4)\n s.push('dog')\n print(s.get_top())\n s.push(True)\n print(s.size())\n print(s.is_empty())\n s.push(8.4)\n print(s.pop())\n print(s.pop())\n print(s.size())\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Stack(object):\n\n def __init__(self):\n self.items = []\n\n def is_empty(self):\n return self.items == []\n\n def clear(self):\n self.items = []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n def get_top(self):\n return self.items[len(self.items) - 1]\n\n\ndef main():\n s = Stack()\n print(s.is_empty())\n s.push(4)\n s.push('dog')\n print(s.get_top())\n s.push(True)\n print(s.size())\n print(s.is_empty())\n s.push(8.4)\n print(s.pop())\n print(s.pop())\n print(s.size())\n pass\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!usr/bin/python\n# -*- coding:UTF-8 -*-\n\n'''\nIntroduction: \nImplementation of Stack \n\nCreated on: Oct 28, 2014\n\n@author: ICY\n'''\n\n#-------------------------FUNCTION---------------------------#\n\nclass Stack(object):\n def __init__(self):\n self.items = []\n\n def is_empty(self):\n return self.items == []\n\n def clear(self):\n self.items = []\n\n def push(self,item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n def get_top(self):\n return self.items[len(self.items)-1]\n\n\n#----------------------------SELF TEST----------------------------#\n\ndef main():\n s=Stack()\n print(s.is_empty())\n s.push(4)\n s.push('dog')\n print(s.get_top())\n s.push(True)\n print(s.size())\n print(s.is_empty())\n s.push(8.4)\n print(s.pop())\n print(s.pop())\n print(s.size())\n pass\n \nif __name__ == '__main__': \n main()\n \n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def parseSexLabel(string):
if string.startswith('male'):
return 0
if string.startswith('female'):
return 1
print('ERROR parsing sex from ' + string)
<|reserved_special_token_0|>
def parseExpLabel(string):
if string.startswith('serious'):
return 0
if string.startswith('smiling') or string.startswith('funny'):
return 1
print('ERROR parsing expression from ' + string)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parseSexLabel(string):
if string.startswith('male'):
return 0
if string.startswith('female'):
return 1
print('ERROR parsing sex from ' + string)
def parseAgeLabel(string):
if string.startswith('child'):
return 0
if string.startswith('teen'):
return 1
if string.startswith('adult'):
return 2
if string.startswith('senior'):
return 3
print('ERROR parsing age from ' + string)
def parseExpLabel(string):
if string.startswith('serious'):
return 0
if string.startswith('smiling') or string.startswith('funny'):
return 1
print('ERROR parsing expression from ' + string)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parseSexLabel(string):
if string.startswith('male'):
return 0
if string.startswith('female'):
return 1
print('ERROR parsing sex from ' + string)
def parseAgeLabel(string):
if string.startswith('child'):
return 0
if string.startswith('teen'):
return 1
if string.startswith('adult'):
return 2
if string.startswith('senior'):
return 3
print('ERROR parsing age from ' + string)
def parseExpLabel(string):
if string.startswith('serious'):
return 0
if string.startswith('smiling') or string.startswith('funny'):
return 1
print('ERROR parsing expression from ' + string)
<|reserved_special_token_0|>
for line in open('MITFaces/faceDR'):
numTraining += 1
<|reserved_special_token_0|>
for line in open('MITFaces/faceDR'):
parts = line.split()
trainingSexLabels[index] = parseSexLabel(parts[2])
trainingAgeLabels[index] = parseAgeLabel(parts[4])
trainingExpLabels[index] = parseExpLabel(parts[8])
fileName = 'MITFaces/rawdata/' + parts[0]
fileIn = open(fileName, 'rb')
trainingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=
dimensions) / 255.0
fileIn.close()
index += 1
<|reserved_special_token_0|>
for line in open('MITFaces/faceDS'):
numValidation += 1
<|reserved_special_token_0|>
numValidation -= numTesting
<|reserved_special_token_0|>
for line in open('MITFaces/faceDS'):
parts = line.split()
if index < numTesting:
testingSexLabels[index] = parseSexLabel(parts[2])
testingAgeLabels[index] = parseAgeLabel(parts[4])
testingExpLabels[index] = parseExpLabel(parts[8])
fileName = 'MITFaces/rawdata/' + parts[0]
fileIn = open(fileName, 'rb')
testingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=
dimensions) / 255.0
fileIn.close()
else:
vIndex = index - numTesting
validationSexLabels[vIndex] = parseSexLabel(parts[2])
validationAgeLabels[vIndex] = parseAgeLabel(parts[4])
validationExpLabels[vIndex] = parseExpLabel(parts[8])
fileName = 'MITFaces/rawdata/' + parts[0]
fileIn = open(fileName, 'rb')
validationFaces[vIndex, :] = np.fromfile(fileIn, dtype=np.uint8,
count=dimensions) / 255.0
fileIn.close()
index += 1
<|reserved_special_token_0|>
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Conv2D(16, kernel_size=(1, 1), activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.
optimizers.Adadelta(), metrics=['accuracy'])
model.fit(x_train, y_train, epochs=epochs, verbose=1)
<|reserved_special_token_0|>
print('Test loss:', score[0])
print('Test accuracy:', score[1])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parseSexLabel(string):
if string.startswith('male'):
return 0
if string.startswith('female'):
return 1
print('ERROR parsing sex from ' + string)
def parseAgeLabel(string):
if string.startswith('child'):
return 0
if string.startswith('teen'):
return 1
if string.startswith('adult'):
return 2
if string.startswith('senior'):
return 3
print('ERROR parsing age from ' + string)
def parseExpLabel(string):
if string.startswith('serious'):
return 0
if string.startswith('smiling') or string.startswith('funny'):
return 1
print('ERROR parsing expression from ' + string)
numTraining = 0
for line in open('MITFaces/faceDR'):
numTraining += 1
dimensions = 128 * 128
trainingFaces = np.zeros([numTraining, dimensions])
trainingSexLabels = np.zeros(numTraining)
trainingAgeLabels = np.zeros(numTraining)
trainingExpLabels = np.zeros(numTraining)
index = 0
for line in open('MITFaces/faceDR'):
parts = line.split()
trainingSexLabels[index] = parseSexLabel(parts[2])
trainingAgeLabels[index] = parseAgeLabel(parts[4])
trainingExpLabels[index] = parseExpLabel(parts[8])
fileName = 'MITFaces/rawdata/' + parts[0]
fileIn = open(fileName, 'rb')
trainingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=
dimensions) / 255.0
fileIn.close()
index += 1
numValidation = 0
numTesting = 0
for line in open('MITFaces/faceDS'):
numValidation += 1
numTesting = int(numValidation / 2)
numValidation -= numTesting
validationFaces = np.zeros([numValidation, dimensions])
validationSexLabels = np.zeros(numValidation)
validationAgeLabels = np.zeros(numValidation)
validationExpLabels = np.zeros(numValidation)
testingFaces = np.zeros([numTesting, dimensions])
testingSexLabels = np.zeros(numTesting)
testingAgeLabels = np.zeros(numTesting)
testingExpLabels = np.zeros(numTesting)
index = 0
for line in open('MITFaces/faceDS'):
parts = line.split()
if index < numTesting:
testingSexLabels[index] = parseSexLabel(parts[2])
testingAgeLabels[index] = parseAgeLabel(parts[4])
testingExpLabels[index] = parseExpLabel(parts[8])
fileName = 'MITFaces/rawdata/' + parts[0]
fileIn = open(fileName, 'rb')
testingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=
dimensions) / 255.0
fileIn.close()
else:
vIndex = index - numTesting
validationSexLabels[vIndex] = parseSexLabel(parts[2])
validationAgeLabels[vIndex] = parseAgeLabel(parts[4])
validationExpLabels[vIndex] = parseExpLabel(parts[8])
fileName = 'MITFaces/rawdata/' + parts[0]
fileIn = open(fileName, 'rb')
validationFaces[vIndex, :] = np.fromfile(fileIn, dtype=np.uint8,
count=dimensions) / 255.0
fileIn.close()
index += 1
<|reserved_special_token_0|>
batch_size = 128
epochs = 12
x_train = trainingFaces
y_train = trainingSexLabels
x_test = testingFaces
y_test = testingSexLabels
y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Conv2D(16, kernel_size=(1, 1), activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.
optimizers.Adadelta(), metrics=['accuracy'])
model.fit(x_train, y_train, epochs=epochs, verbose=1)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
<|reserved_special_token_1|>
import numpy as np
# Read in training data and labels
# Some useful parsing functions
# male/female -> 0/1
def parseSexLabel(string):
if (string.startswith('male')):
return 0
if (string.startswith('female')):
return 1
print("ERROR parsing sex from " + string)
# child/teen/adult/senior -> 0/1/2/3
def parseAgeLabel(string):
if (string.startswith('child')):
return 0
if (string.startswith('teen')):
return 1
if (string.startswith('adult')):
return 2
if (string.startswith('senior')):
return 3
print("ERROR parsing age from " + string)
# serious/smiling -> 0/1
def parseExpLabel(string):
if (string.startswith('serious')):
return 0
if (string.startswith('smiling') or string.startswith('funny')):
return 1
print("ERROR parsing expression from " + string)
# Count number of training instances
numTraining = 0
for line in open ("MITFaces/faceDR"):
numTraining += 1
dimensions = 128*128
trainingFaces = np.zeros([numTraining,dimensions])
trainingSexLabels = np.zeros(numTraining) # Sex - 0 = male; 1 = female
trainingAgeLabels = np.zeros(numTraining) # Age - 0 = child; 1 = teen; 2 = male
trainingExpLabels = np.zeros(numTraining) # Expression - 0 = serious; 1 = smiling
index = 0
for line in open ("MITFaces/faceDR"):
# Parse the label data
parts = line.split()
trainingSexLabels[index] = parseSexLabel(parts[2])
trainingAgeLabels[index] = parseAgeLabel(parts[4])
trainingExpLabels[index] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
trainingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
# And move along
index += 1
# Count number of validation/testing instances
numValidation = 0
numTesting = 0
# Assume they're all Validation
for line in open ("MITFaces/faceDS"):
numValidation += 1
# And make half of them testing
numTesting = int(numValidation/2)
numValidation -= numTesting
validationFaces = np.zeros([numValidation,dimensions])
validationSexLabels = np.zeros(numValidation) # Sex - 0 = male; 1 = female
validationAgeLabels = np.zeros(numValidation) # Age - 0 = child; 1 = teen; 2 = male
validationExpLabels = np.zeros(numValidation) # Expression - 0 = serious; 1 = smiling
testingFaces = np.zeros([numTesting,dimensions])
testingSexLabels = np.zeros(numTesting) # Sex - 0 = male; 1 = female
testingAgeLabels = np.zeros(numTesting) # Age - 0 = child; 1 = teen; 2 = male
testingExpLabels = np.zeros(numTesting) # Expression - 0 = serious; 1 = smiling
index = 0
for line in open ("MITFaces/faceDS"):
# Parse the label data
parts = line.split()
if (index < numTesting):
testingSexLabels[index] = parseSexLabel(parts[2])
testingAgeLabels[index] = parseAgeLabel(parts[4])
testingExpLabels[index] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
testingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
else:
vIndex = index - numTesting
validationSexLabels[vIndex] = parseSexLabel(parts[2])
validationAgeLabels[vIndex] = parseAgeLabel(parts[4])
validationExpLabels[vIndex] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
validationFaces[vIndex,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
# And move along
index += 1
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
import tensorflow as tf
from tensorflow import keras
batch_size = 128
epochs = 12
x_train = trainingFaces
y_train = trainingSexLabels
x_test = testingFaces
y_test = testingSexLabels
y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Conv2D(16, kernel_size=(1,1),activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=epochs,
verbose=1)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
flexible
|
{
"blob_id": "6822a0a194e8b401fecfed2b617ddd5489302389",
"index": 4718,
"step-1": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\n<mask token>\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\ndef parseAgeLabel(string):\n if string.startswith('child'):\n return 0\n if string.startswith('teen'):\n return 1\n if string.startswith('adult'):\n return 2\n if string.startswith('senior'):\n return 3\n print('ERROR parsing age from ' + string)\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\ndef parseAgeLabel(string):\n if string.startswith('child'):\n return 0\n if string.startswith('teen'):\n return 1\n if string.startswith('adult'):\n return 2\n if string.startswith('senior'):\n return 3\n print('ERROR parsing age from ' + string)\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\n<mask token>\nfor line in open('MITFaces/faceDR'):\n numTraining += 1\n<mask token>\nfor line in open('MITFaces/faceDR'):\n parts = line.split()\n trainingSexLabels[index] = parseSexLabel(parts[2])\n trainingAgeLabels[index] = parseAgeLabel(parts[4])\n trainingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n trainingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n index += 1\n<mask token>\nfor line in open('MITFaces/faceDS'):\n numValidation += 1\n<mask token>\nnumValidation -= numTesting\n<mask token>\nfor line in open('MITFaces/faceDS'):\n parts = line.split()\n if index < numTesting:\n testingSexLabels[index] = parseSexLabel(parts[2])\n testingAgeLabels[index] = parseAgeLabel(parts[4])\n testingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n testingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n else:\n vIndex = index - numTesting\n validationSexLabels[vIndex] = parseSexLabel(parts[2])\n validationAgeLabels[vIndex] = parseAgeLabel(parts[4])\n validationExpLabels[vIndex] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n validationFaces[vIndex, :] = np.fromfile(fileIn, dtype=np.uint8,\n count=dimensions) / 255.0\n fileIn.close()\n index += 1\n<mask token>\nmodel.add(keras.layers.Dense(32, activation='relu'))\nmodel.add(keras.layers.Conv2D(16, kernel_size=(1, 1), activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.Dense(2, activation='softmax'))\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.\n optimizers.Adadelta(), metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=epochs, verbose=1)\n<mask token>\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n",
"step-4": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\ndef parseAgeLabel(string):\n if string.startswith('child'):\n return 0\n if string.startswith('teen'):\n return 1\n if string.startswith('adult'):\n return 2\n if string.startswith('senior'):\n return 3\n print('ERROR parsing age from ' + string)\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\nnumTraining = 0\nfor line in open('MITFaces/faceDR'):\n numTraining += 1\ndimensions = 128 * 128\ntrainingFaces = np.zeros([numTraining, dimensions])\ntrainingSexLabels = np.zeros(numTraining)\ntrainingAgeLabels = np.zeros(numTraining)\ntrainingExpLabels = np.zeros(numTraining)\nindex = 0\nfor line in open('MITFaces/faceDR'):\n parts = line.split()\n trainingSexLabels[index] = parseSexLabel(parts[2])\n trainingAgeLabels[index] = parseAgeLabel(parts[4])\n trainingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n trainingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n index += 1\nnumValidation = 0\nnumTesting = 0\nfor line in open('MITFaces/faceDS'):\n numValidation += 1\nnumTesting = int(numValidation / 2)\nnumValidation -= numTesting\nvalidationFaces = np.zeros([numValidation, dimensions])\nvalidationSexLabels = np.zeros(numValidation)\nvalidationAgeLabels = np.zeros(numValidation)\nvalidationExpLabels = np.zeros(numValidation)\ntestingFaces = np.zeros([numTesting, dimensions])\ntestingSexLabels = np.zeros(numTesting)\ntestingAgeLabels = np.zeros(numTesting)\ntestingExpLabels = np.zeros(numTesting)\nindex = 0\nfor line in open('MITFaces/faceDS'):\n parts = line.split()\n if index < numTesting:\n testingSexLabels[index] = parseSexLabel(parts[2])\n testingAgeLabels[index] = parseAgeLabel(parts[4])\n testingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n testingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n else:\n vIndex = index - numTesting\n validationSexLabels[vIndex] = parseSexLabel(parts[2])\n validationAgeLabels[vIndex] = parseAgeLabel(parts[4])\n validationExpLabels[vIndex] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n validationFaces[vIndex, :] = np.fromfile(fileIn, dtype=np.uint8,\n count=dimensions) / 255.0\n fileIn.close()\n index += 1\n<mask token>\nbatch_size = 128\nepochs = 12\nx_train = trainingFaces\ny_train = trainingSexLabels\nx_test = testingFaces\ny_test = testingSexLabels\ny_train = keras.utils.to_categorical(y_train, num_classes=2)\ny_test = keras.utils.to_categorical(y_test, num_classes=2)\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(32, activation='relu'))\nmodel.add(keras.layers.Conv2D(16, kernel_size=(1, 1), activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.Dense(2, activation='softmax'))\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.\n optimizers.Adadelta(), metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=epochs, verbose=1)\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n",
"step-5": "import numpy as np\n\n# Read in training data and labels\n\n# Some useful parsing functions\n\n# male/female -> 0/1\ndef parseSexLabel(string):\n if (string.startswith('male')):\n return 0\n if (string.startswith('female')):\n return 1\n print(\"ERROR parsing sex from \" + string)\n\n# child/teen/adult/senior -> 0/1/2/3\ndef parseAgeLabel(string):\n if (string.startswith('child')):\n return 0\n if (string.startswith('teen')):\n return 1\n if (string.startswith('adult')):\n return 2\n if (string.startswith('senior')):\n return 3\n print(\"ERROR parsing age from \" + string)\n\n# serious/smiling -> 0/1\ndef parseExpLabel(string):\n if (string.startswith('serious')):\n return 0\n if (string.startswith('smiling') or string.startswith('funny')):\n return 1\n print(\"ERROR parsing expression from \" + string)\n\n# Count number of training instances\n\nnumTraining = 0\n\nfor line in open (\"MITFaces/faceDR\"):\n numTraining += 1\n\ndimensions = 128*128\n\ntrainingFaces = np.zeros([numTraining,dimensions])\ntrainingSexLabels = np.zeros(numTraining) # Sex - 0 = male; 1 = female\ntrainingAgeLabels = np.zeros(numTraining) # Age - 0 = child; 1 = teen; 2 = male \ntrainingExpLabels = np.zeros(numTraining) # Expression - 0 = serious; 1 = smiling\n\nindex = 0\nfor line in open (\"MITFaces/faceDR\"):\n # Parse the label data\n parts = line.split()\n trainingSexLabels[index] = parseSexLabel(parts[2])\n trainingAgeLabels[index] = parseAgeLabel(parts[4])\n trainingExpLabels[index] = parseExpLabel(parts[8])\n # Read in the face\n fileName = \"MITFaces/rawdata/\" + parts[0]\n fileIn = open(fileName, 'rb')\n trainingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0\n fileIn.close()\n # And move along\n index += 1\n\n# Count number of validation/testing instances\n\nnumValidation = 0\nnumTesting = 0\n\n# Assume they're all Validation\nfor line in open (\"MITFaces/faceDS\"):\n numValidation += 1\n\n# And make half of them testing\nnumTesting = int(numValidation/2)\nnumValidation -= numTesting\n\nvalidationFaces = np.zeros([numValidation,dimensions])\nvalidationSexLabels = np.zeros(numValidation) # Sex - 0 = male; 1 = female\nvalidationAgeLabels = np.zeros(numValidation) # Age - 0 = child; 1 = teen; 2 = male \nvalidationExpLabels = np.zeros(numValidation) # Expression - 0 = serious; 1 = smiling\n\ntestingFaces = np.zeros([numTesting,dimensions])\ntestingSexLabels = np.zeros(numTesting) # Sex - 0 = male; 1 = female\ntestingAgeLabels = np.zeros(numTesting) # Age - 0 = child; 1 = teen; 2 = male \ntestingExpLabels = np.zeros(numTesting) # Expression - 0 = serious; 1 = smiling\n\nindex = 0\nfor line in open (\"MITFaces/faceDS\"):\n # Parse the label data\n parts = line.split()\n\n if (index < numTesting):\n testingSexLabels[index] = parseSexLabel(parts[2])\n testingAgeLabels[index] = parseAgeLabel(parts[4])\n testingExpLabels[index] = parseExpLabel(parts[8])\n # Read in the face\n fileName = \"MITFaces/rawdata/\" + parts[0]\n fileIn = open(fileName, 'rb')\n testingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0\n fileIn.close()\n else:\n vIndex = index - numTesting\n validationSexLabels[vIndex] = parseSexLabel(parts[2])\n validationAgeLabels[vIndex] = parseAgeLabel(parts[4])\n validationExpLabels[vIndex] = parseExpLabel(parts[8])\n # Read in the face\n fileName = \"MITFaces/rawdata/\" + parts[0]\n fileIn = open(fileName, 'rb')\n validationFaces[vIndex,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0\n fileIn.close()\n \n # And move along\n index += 1\n\n\n\n\n\n\n\n\n\n\n'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nbatch_size = 128\nepochs = 12\n\nx_train = trainingFaces\ny_train = trainingSexLabels\nx_test = testingFaces\ny_test = testingSexLabels\n\ny_train = keras.utils.to_categorical(y_train, num_classes=2)\ny_test = keras.utils.to_categorical(y_test, num_classes=2)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(32, activation='relu'))\nmodel.add(keras.layers.Conv2D(16, kernel_size=(1,1),activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.Dense(2, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n epochs=epochs,\n verbose=1)\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
"""Step (with Warm up) learning rate scheduler module."""
from typing import Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
from typeguard import check_argument_types
from espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler
class WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):
"""The WarmupStepLR scheduler.
This scheduler is the combination of WarmupLR and StepLR:
WarmupLR:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
WarmupStepLR:
if step <= warmup_step:
lr = optimizer.lr * warmup_step ** 0.5
* min(step ** -0.5, step * warmup_step ** -1.5)
else:
lr = optimizer.lr * (gamma ** (epoch//step_size))
Note that the maximum lr equals to optimizer.lr in this scheduler.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
# for WarmupLR
warmup_steps: Union[int, float] = 25000,
# for StepLR
steps_per_epoch: int = 10000,
step_size: int = 1,
gamma: float = 0.1,
last_epoch: int = -1,
):
assert check_argument_types()
self.warmup_steps = warmup_steps
self.step_num = 0
self.epoch_num = 0
# NOTE: This number should be adjusted accordingly
# once batch_size/ngpu/num_nodes is changed.
# To get the exact number of iterations per epoch, refer to
# https://github.com/espnet/espnet/discussions/4404
self.steps_per_epoch = steps_per_epoch
self.warmup_epoch = warmup_steps // steps_per_epoch
self.lr_scale = warmup_steps**-1
# after warmup_steps, decrease lr by `gamma` every `step_size` epochs
self.step_size = step_size
self.gamma = gamma
# __init__() must be invoked before setting field
# because step() is also invoked in __init__()
super().__init__(optimizer, last_epoch)
def __repr__(self):
return (
f"{self.__class__.__name__}(warmup_steps={self.warmup_steps}, "
f"steps_per_epoch={self.steps_per_epoch},"
f" step_size={self.step_size}, gamma={self.gamma})"
)
def get_lr(self):
self.step_num += 1
if self.step_num % self.steps_per_epoch == 0:
self.epoch_num += 1
if self.step_num <= self.warmup_steps:
return [lr * self.lr_scale * self.step_num for lr in self.base_lrs]
else:
return [
lr
* self.gamma ** ((self.epoch_num - self.warmup_epoch) // self.step_size)
for lr in self.base_lrs
]
|
normal
|
{
"blob_id": "bce16762c0739087a8309872da4ac04298c50893",
"index": 7695,
"step-1": "<mask token>\n\n\nclass WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):\n <mask token>\n <mask token>\n\n def __repr__(self):\n return (\n f'{self.__class__.__name__}(warmup_steps={self.warmup_steps}, steps_per_epoch={self.steps_per_epoch}, step_size={self.step_size}, gamma={self.gamma})'\n )\n\n def get_lr(self):\n self.step_num += 1\n if self.step_num % self.steps_per_epoch == 0:\n self.epoch_num += 1\n if self.step_num <= self.warmup_steps:\n return [(lr * self.lr_scale * self.step_num) for lr in self.\n base_lrs]\n else:\n return [(lr * self.gamma ** ((self.epoch_num - self.\n warmup_epoch) // self.step_size)) for lr in self.base_lrs]\n",
"step-2": "<mask token>\n\n\nclass WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):\n <mask token>\n\n def __init__(self, optimizer: torch.optim.Optimizer, warmup_steps:\n Union[int, float]=25000, steps_per_epoch: int=10000, step_size: int\n =1, gamma: float=0.1, last_epoch: int=-1):\n assert check_argument_types()\n self.warmup_steps = warmup_steps\n self.step_num = 0\n self.epoch_num = 0\n self.steps_per_epoch = steps_per_epoch\n self.warmup_epoch = warmup_steps // steps_per_epoch\n self.lr_scale = warmup_steps ** -1\n self.step_size = step_size\n self.gamma = gamma\n super().__init__(optimizer, last_epoch)\n\n def __repr__(self):\n return (\n f'{self.__class__.__name__}(warmup_steps={self.warmup_steps}, steps_per_epoch={self.steps_per_epoch}, step_size={self.step_size}, gamma={self.gamma})'\n )\n\n def get_lr(self):\n self.step_num += 1\n if self.step_num % self.steps_per_epoch == 0:\n self.epoch_num += 1\n if self.step_num <= self.warmup_steps:\n return [(lr * self.lr_scale * self.step_num) for lr in self.\n base_lrs]\n else:\n return [(lr * self.gamma ** ((self.epoch_num - self.\n warmup_epoch) // self.step_size)) for lr in self.base_lrs]\n",
"step-3": "<mask token>\n\n\nclass WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):\n \"\"\"The WarmupStepLR scheduler.\n\n This scheduler is the combination of WarmupLR and StepLR:\n\n WarmupLR:\n lr = optimizer.lr * warmup_step ** 0.5\n * min(step ** -0.5, step * warmup_step ** -1.5)\n WarmupStepLR:\n if step <= warmup_step:\n lr = optimizer.lr * warmup_step ** 0.5\n * min(step ** -0.5, step * warmup_step ** -1.5)\n else:\n lr = optimizer.lr * (gamma ** (epoch//step_size))\n\n Note that the maximum lr equals to optimizer.lr in this scheduler.\n\n \"\"\"\n\n def __init__(self, optimizer: torch.optim.Optimizer, warmup_steps:\n Union[int, float]=25000, steps_per_epoch: int=10000, step_size: int\n =1, gamma: float=0.1, last_epoch: int=-1):\n assert check_argument_types()\n self.warmup_steps = warmup_steps\n self.step_num = 0\n self.epoch_num = 0\n self.steps_per_epoch = steps_per_epoch\n self.warmup_epoch = warmup_steps // steps_per_epoch\n self.lr_scale = warmup_steps ** -1\n self.step_size = step_size\n self.gamma = gamma\n super().__init__(optimizer, last_epoch)\n\n def __repr__(self):\n return (\n f'{self.__class__.__name__}(warmup_steps={self.warmup_steps}, steps_per_epoch={self.steps_per_epoch}, step_size={self.step_size}, gamma={self.gamma})'\n )\n\n def get_lr(self):\n self.step_num += 1\n if self.step_num % self.steps_per_epoch == 0:\n self.epoch_num += 1\n if self.step_num <= self.warmup_steps:\n return [(lr * self.lr_scale * self.step_num) for lr in self.\n base_lrs]\n else:\n return [(lr * self.gamma ** ((self.epoch_num - self.\n warmup_epoch) // self.step_size)) for lr in self.base_lrs]\n",
"step-4": "<mask token>\nfrom typing import Union\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom typeguard import check_argument_types\nfrom espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler\n\n\nclass WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):\n \"\"\"The WarmupStepLR scheduler.\n\n This scheduler is the combination of WarmupLR and StepLR:\n\n WarmupLR:\n lr = optimizer.lr * warmup_step ** 0.5\n * min(step ** -0.5, step * warmup_step ** -1.5)\n WarmupStepLR:\n if step <= warmup_step:\n lr = optimizer.lr * warmup_step ** 0.5\n * min(step ** -0.5, step * warmup_step ** -1.5)\n else:\n lr = optimizer.lr * (gamma ** (epoch//step_size))\n\n Note that the maximum lr equals to optimizer.lr in this scheduler.\n\n \"\"\"\n\n def __init__(self, optimizer: torch.optim.Optimizer, warmup_steps:\n Union[int, float]=25000, steps_per_epoch: int=10000, step_size: int\n =1, gamma: float=0.1, last_epoch: int=-1):\n assert check_argument_types()\n self.warmup_steps = warmup_steps\n self.step_num = 0\n self.epoch_num = 0\n self.steps_per_epoch = steps_per_epoch\n self.warmup_epoch = warmup_steps // steps_per_epoch\n self.lr_scale = warmup_steps ** -1\n self.step_size = step_size\n self.gamma = gamma\n super().__init__(optimizer, last_epoch)\n\n def __repr__(self):\n return (\n f'{self.__class__.__name__}(warmup_steps={self.warmup_steps}, steps_per_epoch={self.steps_per_epoch}, step_size={self.step_size}, gamma={self.gamma})'\n )\n\n def get_lr(self):\n self.step_num += 1\n if self.step_num % self.steps_per_epoch == 0:\n self.epoch_num += 1\n if self.step_num <= self.warmup_steps:\n return [(lr * self.lr_scale * self.step_num) for lr in self.\n base_lrs]\n else:\n return [(lr * self.gamma ** ((self.epoch_num - self.\n warmup_epoch) // self.step_size)) for lr in self.base_lrs]\n",
"step-5": "\"\"\"Step (with Warm up) learning rate scheduler module.\"\"\"\nfrom typing import Union\n\nimport torch\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom typeguard import check_argument_types\n\nfrom espnet2.schedulers.abs_scheduler import AbsBatchStepScheduler\n\n\nclass WarmupStepLR(_LRScheduler, AbsBatchStepScheduler):\n \"\"\"The WarmupStepLR scheduler.\n\n This scheduler is the combination of WarmupLR and StepLR:\n\n WarmupLR:\n lr = optimizer.lr * warmup_step ** 0.5\n * min(step ** -0.5, step * warmup_step ** -1.5)\n WarmupStepLR:\n if step <= warmup_step:\n lr = optimizer.lr * warmup_step ** 0.5\n * min(step ** -0.5, step * warmup_step ** -1.5)\n else:\n lr = optimizer.lr * (gamma ** (epoch//step_size))\n\n Note that the maximum lr equals to optimizer.lr in this scheduler.\n\n \"\"\"\n\n def __init__(\n self,\n optimizer: torch.optim.Optimizer,\n # for WarmupLR\n warmup_steps: Union[int, float] = 25000,\n # for StepLR\n steps_per_epoch: int = 10000,\n step_size: int = 1,\n gamma: float = 0.1,\n last_epoch: int = -1,\n ):\n assert check_argument_types()\n self.warmup_steps = warmup_steps\n\n self.step_num = 0\n self.epoch_num = 0\n # NOTE: This number should be adjusted accordingly\n # once batch_size/ngpu/num_nodes is changed.\n # To get the exact number of iterations per epoch, refer to\n # https://github.com/espnet/espnet/discussions/4404\n self.steps_per_epoch = steps_per_epoch\n self.warmup_epoch = warmup_steps // steps_per_epoch\n\n self.lr_scale = warmup_steps**-1\n\n # after warmup_steps, decrease lr by `gamma` every `step_size` epochs\n self.step_size = step_size\n self.gamma = gamma\n\n # __init__() must be invoked before setting field\n # because step() is also invoked in __init__()\n super().__init__(optimizer, last_epoch)\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(warmup_steps={self.warmup_steps}, \"\n f\"steps_per_epoch={self.steps_per_epoch},\"\n f\" step_size={self.step_size}, gamma={self.gamma})\"\n )\n\n def get_lr(self):\n self.step_num += 1\n if self.step_num % self.steps_per_epoch == 0:\n self.epoch_num += 1\n\n if self.step_num <= self.warmup_steps:\n return [lr * self.lr_scale * self.step_num for lr in self.base_lrs]\n else:\n return [\n lr\n * self.gamma ** ((self.epoch_num - self.warmup_epoch) // self.step_size)\n for lr in self.base_lrs\n ]\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(k):
l, r = map(int, input().split())
k_list.append([l, r])
<|reserved_special_token_0|>
for i in range(1, n):
dpsum[i] = dp[i] + dpsum[i - 1]
for j in range(k):
l, r = k_list[j]
li = i + l
ri = i + r + 1
if li <= n:
dp[li] += dpsum[i]
dp[li] = dp[li] % 998244353
if ri <= n:
dp[ri] -= dpsum[i]
dp[ri] = dp[ri] % 998244353
print(dp[n])
<|reserved_special_token_1|>
n, k = map(int, input().split())
k_list = []
for i in range(k):
l, r = map(int, input().split())
k_list.append([l, r])
dp = [0] * (n + 1)
dp[1] = 1
dpsum = [0] * (n + 1)
dpsum[1] = 1
for i in range(1, n):
dpsum[i] = dp[i] + dpsum[i - 1]
for j in range(k):
l, r = k_list[j]
li = i + l
ri = i + r + 1
if li <= n:
dp[li] += dpsum[i]
dp[li] = dp[li] % 998244353
if ri <= n:
dp[ri] -= dpsum[i]
dp[ri] = dp[ri] % 998244353
print(dp[n])
|
flexible
|
{
"blob_id": "97720baab961d50ceae832d52350b9871c552c84",
"index": 9071,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(k):\n l, r = map(int, input().split())\n k_list.append([l, r])\n<mask token>\nfor i in range(1, n):\n dpsum[i] = dp[i] + dpsum[i - 1]\n for j in range(k):\n l, r = k_list[j]\n li = i + l\n ri = i + r + 1\n if li <= n:\n dp[li] += dpsum[i]\n dp[li] = dp[li] % 998244353\n if ri <= n:\n dp[ri] -= dpsum[i]\n dp[ri] = dp[ri] % 998244353\nprint(dp[n])\n",
"step-3": "n, k = map(int, input().split())\nk_list = []\nfor i in range(k):\n l, r = map(int, input().split())\n k_list.append([l, r])\ndp = [0] * (n + 1)\ndp[1] = 1\ndpsum = [0] * (n + 1)\ndpsum[1] = 1\nfor i in range(1, n):\n dpsum[i] = dp[i] + dpsum[i - 1]\n for j in range(k):\n l, r = k_list[j]\n li = i + l\n ri = i + r + 1\n if li <= n:\n dp[li] += dpsum[i]\n dp[li] = dp[li] % 998244353\n if ri <= n:\n dp[ri] -= dpsum[i]\n dp[ri] = dp[ri] % 998244353\nprint(dp[n])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""TcEx Framework Key Value Redis Module"""
class KeyValueRedis:
"""TcEx Key Value Redis Module.
Args:
context (str): The Redis context (hash) for hashed based operations.
redis_client (redis.Client): An instance of redis client.
"""
def __init__(self, context, redis_client):
"""Initialize the Class properties."""
self._context = context
self._redis_client = redis_client
@property
def context(self):
"""Return the current context."""
return self._context
@context.setter
def context(self, context):
"""Set or update the current context."""
self._context = context
def create(self, key, value):
"""Create key/value pair in Redis.
Args:
key (str): The field name (key) for the kv pair in Redis.
value (any): The value for the kv pair in Redis.
Returns:
str: The response from Redis.
"""
return self._redis_client.hset(self.context, key, value)
def delete(self, key):
"""Alias for hdel method.
Args:
key (str): The field name (key) for the kv pair in Redis.
Returns:
str: The response from Redis.
"""
return self._redis_client.hdel(self.context, key)
def hgetall(self):
"""Read data from Redis for the current context.
Returns:
list: The response data from Redis.
"""
return self._redis_client.hgetall(self.context)
def read(self, key):
"""Read data from Redis for the provided key.
Returns:
str: The response data from Redis.
"""
value = self._redis_client.hget(self.context, key)
# convert retrieved bytes to string
if isinstance(value, bytes):
value = value.decode('utf-8')
return value
|
normal
|
{
"blob_id": "a5b74c31aed103b55404afc538af60c3eb18cb1b",
"index": 9738,
"step-1": "<mask token>\n\n\nclass KeyValueRedis:\n <mask token>\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n <mask token>\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass KeyValueRedis:\n <mask token>\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n <mask token>\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass KeyValueRedis:\n <mask token>\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n\n @property\n def context(self):\n \"\"\"Return the current context.\"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n\n def read(self, key):\n \"\"\"Read data from Redis for the provided key.\n\n Returns:\n str: The response data from Redis.\n \"\"\"\n value = self._redis_client.hget(self.context, key)\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n return value\n",
"step-4": "<mask token>\n\n\nclass KeyValueRedis:\n \"\"\"TcEx Key Value Redis Module.\n\n Args:\n context (str): The Redis context (hash) for hashed based operations.\n redis_client (redis.Client): An instance of redis client.\n \"\"\"\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n\n @property\n def context(self):\n \"\"\"Return the current context.\"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n\n def read(self, key):\n \"\"\"Read data from Redis for the provided key.\n\n Returns:\n str: The response data from Redis.\n \"\"\"\n value = self._redis_client.hget(self.context, key)\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n return value\n",
"step-5": "\"\"\"TcEx Framework Key Value Redis Module\"\"\"\n\n\nclass KeyValueRedis:\n \"\"\"TcEx Key Value Redis Module.\n\n Args:\n context (str): The Redis context (hash) for hashed based operations.\n redis_client (redis.Client): An instance of redis client.\n \"\"\"\n\n def __init__(self, context, redis_client):\n \"\"\"Initialize the Class properties.\"\"\"\n self._context = context\n self._redis_client = redis_client\n\n @property\n def context(self):\n \"\"\"Return the current context.\"\"\"\n return self._context\n\n @context.setter\n def context(self, context):\n \"\"\"Set or update the current context.\"\"\"\n self._context = context\n\n def create(self, key, value):\n \"\"\"Create key/value pair in Redis.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n value (any): The value for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hset(self.context, key, value)\n\n def delete(self, key):\n \"\"\"Alias for hdel method.\n\n Args:\n key (str): The field name (key) for the kv pair in Redis.\n\n Returns:\n str: The response from Redis.\n \"\"\"\n return self._redis_client.hdel(self.context, key)\n\n def hgetall(self):\n \"\"\"Read data from Redis for the current context.\n\n Returns:\n list: The response data from Redis.\n \"\"\"\n return self._redis_client.hgetall(self.context)\n\n def read(self, key):\n \"\"\"Read data from Redis for the provided key.\n\n Returns:\n str: The response data from Redis.\n \"\"\"\n value = self._redis_client.hget(self.context, key)\n # convert retrieved bytes to string\n if isinstance(value, bytes):\n value = value.decode('utf-8')\n return value\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('C:\\some\name')
print('C:\\some\\name')
print('C:\\some\\name')
<|reserved_special_token_0|>
print(s)
<|reserved_special_token_0|>
print(s)
<|reserved_special_token_1|>
x = '我是一个字符串'
y = '我也是一个字符串'
z = '我还是一个字符串'
s = "Yes,he doesn't"
print('C:\\some\name')
print('C:\\some\\name')
print('C:\\some\\name')
s = 'abcdefg'
print(s)
s = """
Hello I am fine!
Thinks.
"""
print(s)
<|reserved_special_token_1|>
x = '我是一个字符串'
y = "我也是一个字符串"
z = """我还是一个字符串"""
#字符串str用单引号(' ')或双引号(" ")括起来
#使用反斜杠(\)转义特殊字符。
s = 'Yes,he doesn\'t'
#如果你不想让反斜杠发生转义,
#可以在字符串前面添加一个r,表示原始字符串
print('C:\some\name')
print('C:\\some\\name')
print(r'C:\some\name')
#反斜杠可以作为续行符,表示下一行是上一行的延续。
s = "abcd\
efg"
print(s)
#还可以使用"""..."""或者'''...'''跨越多行
s = """
Hello I am fine!
Thinks.
"""
print(s)
|
flexible
|
{
"blob_id": "8fe9d21bb65b795a6633ab390f7f5d24a90146d5",
"index": 6774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('C:\\\\some\\name')\nprint('C:\\\\some\\\\name')\nprint('C:\\\\some\\\\name')\n<mask token>\nprint(s)\n<mask token>\nprint(s)\n",
"step-3": "x = '我是一个字符串'\ny = '我也是一个字符串'\nz = '我还是一个字符串'\ns = \"Yes,he doesn't\"\nprint('C:\\\\some\\name')\nprint('C:\\\\some\\\\name')\nprint('C:\\\\some\\\\name')\ns = 'abcdefg'\nprint(s)\ns = \"\"\"\nHello I am fine!\nThinks.\n\"\"\"\nprint(s)\n",
"step-4": "x = '我是一个字符串'\r\ny = \"我也是一个字符串\"\r\nz = \"\"\"我还是一个字符串\"\"\"\r\n\r\n\r\n#字符串str用单引号(' ')或双引号(\" \")括起来\r\n\r\n#使用反斜杠(\\)转义特殊字符。\r\ns = 'Yes,he doesn\\'t'\r\n\r\n#如果你不想让反斜杠发生转义,\r\n#可以在字符串前面添加一个r,表示原始字符串\r\nprint('C:\\some\\name')\r\n\r\nprint('C:\\\\some\\\\name')\r\n\r\nprint(r'C:\\some\\name')\r\n\r\n#反斜杠可以作为续行符,表示下一行是上一行的延续。\r\ns = \"abcd\\\r\nefg\"\r\nprint(s)\r\n\r\n#还可以使用\"\"\"...\"\"\"或者'''...'''跨越多行\r\n\r\ns = \"\"\"\r\nHello I am fine!\r\nThinks.\r\n\"\"\"\r\nprint(s)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def gen(task_id):
while True:
print('Thread runned ' + str(task_id))
img = Cameras[task_id].getImg()
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
@restAppi.route('/video_feed/<int:task_id>')
def video_feed(task_id):
return Response(gen(task_id), mimetype=
'multipart/x-mixed-replace; boundary=frame')
@restAppi.route('/camerasPreview', methods=['GET'])
def camerasPreview():
login = category = request.args.get('login')
password = content_id = request.args.get('password')
print(login)
print(password)
if login == Login and password == Password:
return render_template('CamerasPreview.html', name='Kamil',
camerasCount=len(Cameras))
else:
abort(401)
@restAppi.route('/')
def index():
return render_template('index.html')
@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Total: %d MiB' % (total // 2 ** 20))
print('Used: %d MiB' % (used // 2 ** 20))
print('Free: %d MiB' % (free // 2 ** 20))
<|reserved_special_token_0|>
print(devices)
<|reserved_special_token_0|>
for device in devices:
Cameras.append(Camera(i))
i = i + 1
time.sleep(1)
print('Ilość kamer: ' + str(len(Cameras)))
<|reserved_special_token_0|>
def gen(task_id):
while True:
print('Thread runned ' + str(task_id))
img = Cameras[task_id].getImg()
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
@restAppi.route('/video_feed/<int:task_id>')
def video_feed(task_id):
return Response(gen(task_id), mimetype=
'multipart/x-mixed-replace; boundary=frame')
@restAppi.route('/camerasPreview', methods=['GET'])
def camerasPreview():
login = category = request.args.get('login')
password = content_id = request.args.get('password')
print(login)
print(password)
if login == Login and password == Password:
return render_template('CamerasPreview.html', name='Kamil',
camerasCount=len(Cameras))
else:
abort(401)
@restAppi.route('/')
def index():
return render_template('index.html')
@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
<|reserved_special_token_0|>
if __name__ == '__main__':
threading.Thread(target=restAppi.run(debug=False)).start()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
total, used, free = shutil.disk_usage('/')
print('Total: %d MiB' % (total // 2 ** 20))
print('Used: %d MiB' % (used // 2 ** 20))
print('Free: %d MiB' % (free // 2 ** 20))
<|reserved_special_token_0|>
devices = Camera.getDevicesList()
print(devices)
i = 0
Cameras = []
for device in devices:
Cameras.append(Camera(i))
i = i + 1
time.sleep(1)
print('Ilość kamer: ' + str(len(Cameras)))
<|reserved_special_token_0|>
Login = 'kamil'
Password = '123'
tasks = [{'id': 1, 'title': u'Buy groceries', 'description':
u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False}, {'id': 2,
'title': u'Learn Python', 'description':
u'Need to find a good Python tutorial on the web', 'done': False}]
restAppi = Flask(__name__)
def gen(task_id):
while True:
print('Thread runned ' + str(task_id))
img = Cameras[task_id].getImg()
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
@restAppi.route('/video_feed/<int:task_id>')
def video_feed(task_id):
return Response(gen(task_id), mimetype=
'multipart/x-mixed-replace; boundary=frame')
@restAppi.route('/camerasPreview', methods=['GET'])
def camerasPreview():
login = category = request.args.get('login')
password = content_id = request.args.get('password')
print(login)
print(password)
if login == Login and password == Password:
return render_template('CamerasPreview.html', name='Kamil',
camerasCount=len(Cameras))
else:
abort(401)
@restAppi.route('/')
def index():
return render_template('index.html')
@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
restAppiRunned = False
if __name__ == '__main__':
threading.Thread(target=restAppi.run(debug=False)).start()
<|reserved_special_token_1|>
import shutil
total, used, free = shutil.disk_usage('/')
print('Total: %d MiB' % (total // 2 ** 20))
print('Used: %d MiB' % (used // 2 ** 20))
print('Free: %d MiB' % (free // 2 ** 20))
from Camera import Camera
import time
import cv2
devices = Camera.getDevicesList()
print(devices)
i = 0
Cameras = []
for device in devices:
Cameras.append(Camera(i))
i = i + 1
time.sleep(1)
print('Ilość kamer: ' + str(len(Cameras)))
import threading
from flask import render_template, Response
from flask import Flask, jsonify
from flask import abort
from flask import request
Login = 'kamil'
Password = '123'
tasks = [{'id': 1, 'title': u'Buy groceries', 'description':
u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False}, {'id': 2,
'title': u'Learn Python', 'description':
u'Need to find a good Python tutorial on the web', 'done': False}]
restAppi = Flask(__name__)
def gen(task_id):
while True:
print('Thread runned ' + str(task_id))
img = Cameras[task_id].getImg()
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n'
@restAppi.route('/video_feed/<int:task_id>')
def video_feed(task_id):
return Response(gen(task_id), mimetype=
'multipart/x-mixed-replace; boundary=frame')
@restAppi.route('/camerasPreview', methods=['GET'])
def camerasPreview():
login = category = request.args.get('login')
password = content_id = request.args.get('password')
print(login)
print(password)
if login == Login and password == Password:
return render_template('CamerasPreview.html', name='Kamil',
camerasCount=len(Cameras))
else:
abort(401)
@restAppi.route('/')
def index():
return render_template('index.html')
@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
restAppiRunned = False
if __name__ == '__main__':
threading.Thread(target=restAppi.run(debug=False)).start()
<|reserved_special_token_1|>
import shutil
total, used, free = shutil.disk_usage("/")
print("Total: %d MiB" % (total // (2**20)))
print("Used: %d MiB" % (used // (2**20)))
print("Free: %d MiB" % (free // (2**20)))
from Camera import Camera
import time
import cv2
devices = Camera.getDevicesList()
print(devices)
i=0
Cameras = []
for device in devices:
Cameras.append(Camera(i))
i=i+1
time.sleep(1)
print("Ilość kamer: " + str(len(Cameras)))
import threading
### REST
from flask import render_template, Response
from flask import Flask, jsonify
from flask import abort
from flask import request
Login = "kamil"
Password = "123"
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
restAppi = Flask(__name__)
def gen(task_id):
while True:
print("Thread runned " + str(task_id))
#get camera frame
img = Cameras[task_id].getImg()
ret, jpeg = cv2.imencode('.jpg', img)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@restAppi.route('/video_feed/<int:task_id>')
def video_feed(task_id):
#print(task_id)
return Response(gen(task_id),
mimetype='multipart/x-mixed-replace; boundary=frame')
@restAppi.route('/camerasPreview', methods=['GET'])
def camerasPreview():
login = category = request.args.get('login')
password = content_id = request.args.get('password')
print(login)
print(password)
if (login == Login) and password == Password:
return render_template("CamerasPreview.html", name = "Kamil", camerasCount = len(Cameras))
else: abort(401);
@restAppi.route('/')
def index():
return render_template("index.html")
@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks():
return jsonify({'tasks': tasks})
@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
restAppiRunned = False
if __name__ == "__main__":
threading.Thread(target=restAppi.run(debug=False)).start()
#if __name__ == '__main__':
# restAppi.run(debug=False)
|
flexible
|
{
"blob_id": "5cdedce5f984f53b8e26d1580a9040b26023f247",
"index": 2910,
"step-1": "<mask token>\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Total: %d MiB' % (total // 2 ** 20))\nprint('Used: %d MiB' % (used // 2 ** 20))\nprint('Free: %d MiB' % (free // 2 ** 20))\n<mask token>\nprint(devices)\n<mask token>\nfor device in devices:\n Cameras.append(Camera(i))\n i = i + 1\ntime.sleep(1)\nprint('Ilość kamer: ' + str(len(Cameras)))\n<mask token>\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\n<mask token>\nif __name__ == '__main__':\n threading.Thread(target=restAppi.run(debug=False)).start()\n",
"step-3": "<mask token>\ntotal, used, free = shutil.disk_usage('/')\nprint('Total: %d MiB' % (total // 2 ** 20))\nprint('Used: %d MiB' % (used // 2 ** 20))\nprint('Free: %d MiB' % (free // 2 ** 20))\n<mask token>\ndevices = Camera.getDevicesList()\nprint(devices)\ni = 0\nCameras = []\nfor device in devices:\n Cameras.append(Camera(i))\n i = i + 1\ntime.sleep(1)\nprint('Ilość kamer: ' + str(len(Cameras)))\n<mask token>\nLogin = 'kamil'\nPassword = '123'\ntasks = [{'id': 1, 'title': u'Buy groceries', 'description':\n u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False}, {'id': 2,\n 'title': u'Learn Python', 'description':\n u'Need to find a good Python tutorial on the web', 'done': False}]\nrestAppi = Flask(__name__)\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\nrestAppiRunned = False\nif __name__ == '__main__':\n threading.Thread(target=restAppi.run(debug=False)).start()\n",
"step-4": "import shutil\ntotal, used, free = shutil.disk_usage('/')\nprint('Total: %d MiB' % (total // 2 ** 20))\nprint('Used: %d MiB' % (used // 2 ** 20))\nprint('Free: %d MiB' % (free // 2 ** 20))\nfrom Camera import Camera\nimport time\nimport cv2\ndevices = Camera.getDevicesList()\nprint(devices)\ni = 0\nCameras = []\nfor device in devices:\n Cameras.append(Camera(i))\n i = i + 1\ntime.sleep(1)\nprint('Ilość kamer: ' + str(len(Cameras)))\nimport threading\nfrom flask import render_template, Response\nfrom flask import Flask, jsonify\nfrom flask import abort\nfrom flask import request\nLogin = 'kamil'\nPassword = '123'\ntasks = [{'id': 1, 'title': u'Buy groceries', 'description':\n u'Milk, Cheese, Pizza, Fruit, Tylenol', 'done': False}, {'id': 2,\n 'title': u'Learn Python', 'description':\n u'Need to find a good Python tutorial on the web', 'done': False}]\nrestAppi = Flask(__name__)\n\n\ndef gen(task_id):\n while True:\n print('Thread runned ' + str(task_id))\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n'\n\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n return Response(gen(task_id), mimetype=\n 'multipart/x-mixed-replace; boundary=frame')\n\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if login == Login and password == Password:\n return render_template('CamerasPreview.html', name='Kamil',\n camerasCount=len(Cameras))\n else:\n abort(401)\n\n\n@restAppi.route('/')\ndef index():\n return render_template('index.html')\n\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\n\nrestAppiRunned = False\nif __name__ == '__main__':\n threading.Thread(target=restAppi.run(debug=False)).start()\n",
"step-5": "import shutil\n\ntotal, used, free = shutil.disk_usage(\"/\")\n\nprint(\"Total: %d MiB\" % (total // (2**20)))\nprint(\"Used: %d MiB\" % (used // (2**20)))\nprint(\"Free: %d MiB\" % (free // (2**20)))\n\n\n\nfrom Camera import Camera\nimport time\nimport cv2\n\ndevices = Camera.getDevicesList()\nprint(devices)\n\ni=0\nCameras = []\nfor device in devices:\n Cameras.append(Camera(i))\n i=i+1\ntime.sleep(1)\n\nprint(\"Ilość kamer: \" + str(len(Cameras)))\n\nimport threading\n\n### REST\nfrom flask import render_template, Response\nfrom flask import Flask, jsonify\nfrom flask import abort\nfrom flask import request\n\nLogin = \"kamil\"\nPassword = \"123\"\n\ntasks = [\n {\n 'id': 1,\n 'title': u'Buy groceries',\n 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',\n 'done': False\n },\n {\n 'id': 2,\n 'title': u'Learn Python',\n 'description': u'Need to find a good Python tutorial on the web',\n 'done': False\n }\n]\n\nrestAppi = Flask(__name__)\ndef gen(task_id):\n while True:\n print(\"Thread runned \" + str(task_id))\n #get camera frame\n img = Cameras[task_id].getImg()\n ret, jpeg = cv2.imencode('.jpg', img)\n frame = jpeg.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n@restAppi.route('/video_feed/<int:task_id>')\ndef video_feed(task_id):\n #print(task_id)\n return Response(gen(task_id),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@restAppi.route('/camerasPreview', methods=['GET'])\ndef camerasPreview():\n login = category = request.args.get('login')\n password = content_id = request.args.get('password')\n print(login)\n print(password)\n if (login == Login) and password == Password:\n return render_template(\"CamerasPreview.html\", name = \"Kamil\", camerasCount = len(Cameras))\n else: abort(401);\n\n@restAppi.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@restAppi.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\n@restAppi.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])\ndef get_task(task_id):\n task = [task for task in tasks if task['id'] == task_id]\n if len(task) == 0:\n abort(404)\n return jsonify({'task': task[0]})\n\nrestAppiRunned = False\nif __name__ == \"__main__\":\n threading.Thread(target=restAppi.run(debug=False)).start()\n\n#if __name__ == '__main__':\n # restAppi.run(debug=False)\n\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
<|reserved_special_token_0|>
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
@render_to('newsletter/import_subscribers_form.html')
def import_subscribers(request):
if request.method == 'POST':
form = ExternalSubscriberUpload(request.POST, request.FILES)
if form.is_valid():
passed, failed = get_externalsubscribers(form.cleaned_data['xls'])
messages.add_message(request, messages.INFO, _(
'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '
) % {'passed': passed, 'failed': failed})
return redirect('admin:newsletter_externalsubscriber_changelist')
else:
form = ExternalSubscriberUpload()
return {'form': form}
<|reserved_special_token_1|>
import xlrd
from django.shortcuts import redirect
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.core import validators
from utils.views import render_to
from accounts.models import Account
from .models import ExternalSubscriber
from .forms import ExternalSubscriberUpload
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
@render_to('newsletter/import_subscribers_form.html')
def import_subscribers(request):
if request.method == 'POST':
form = ExternalSubscriberUpload(request.POST, request.FILES)
if form.is_valid():
passed, failed = get_externalsubscribers(form.cleaned_data['xls'])
messages.add_message(request, messages.INFO, _(
'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '
) % {'passed': passed, 'failed': failed})
return redirect('admin:newsletter_externalsubscriber_changelist')
else:
form = ExternalSubscriberUpload()
return {'form': form}
<|reserved_special_token_1|>
import xlrd
from django.shortcuts import redirect
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.core import validators
from utils.views import render_to
from accounts.models import Account
from .models import ExternalSubscriber
from .forms import ExternalSubscriberUpload
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(
validators.email_re,
unicode(error_message % row_number),
'invalid'
)(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1,sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count+=1
except Exception as e:
fail_count+=1
#print e, u'"%s"' % subscriber['email']
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only('id')):
obj, created = ExternalSubscriber.objects.get_or_create(
email=subscriber['email'],
defaults={
'first_name': subscriber.get('first_name'),
'last_name': subscriber.get('last_name'),
}
)
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and\
getattr(obj, field) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
@render_to('newsletter/import_subscribers_form.html')
def import_subscribers(request):
if request.method == 'POST':
form = ExternalSubscriberUpload(request.POST, request.FILES)
if form.is_valid():
passed, failed = get_externalsubscribers(form.cleaned_data['xls'])
messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed})
return redirect('admin:newsletter_externalsubscriber_changelist')
else:
form = ExternalSubscriberUpload()
return {'form': form}
|
flexible
|
{
"blob_id": "2ec41e02c95a270455c096e85829b7220eeda0c7",
"index": 1317,
"step-1": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\n<mask token>\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _(\n 'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '\n ) % {'passed': passed, 'failed': failed})\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n",
"step-4": "import xlrd\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core import validators\nfrom utils.views import render_to\nfrom accounts.models import Account\nfrom .models import ExternalSubscriber\nfrom .forms import ExternalSubscriberUpload\n\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(validators.email_re, unicode(\n error_message % row_number), 'invalid')(value)\n\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1, sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count += 1\n except Exception as e:\n fail_count += 1\n continue\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n if not bool(Account.objects.filter(email=subscriber['email']).only(\n 'id')):\n obj, created = ExternalSubscriber.objects.get_or_create(email=\n subscriber['email'], defaults={'first_name': subscriber.get\n ('first_name'), 'last_name': subscriber.get('last_name')})\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and getattr(obj, field\n ) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n return pass_count, fail_count\n\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _(\n 'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '\n ) % {'passed': passed, 'failed': failed})\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n",
"step-5": "import xlrd\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core import validators\n\nfrom utils.views import render_to\nfrom accounts.models import Account\n\nfrom .models import ExternalSubscriber\nfrom .forms import ExternalSubscriberUpload\n\ndef validate_email(value, row_number):\n error_message = _(u'Invalid e-mail address on \"%d\" line.')\n return validators.EmailValidator(\n validators.email_re,\n unicode(error_message % row_number),\n 'invalid'\n )(value)\n\ndef upload_handler(file_obj, path_to_save):\n destination = open(path_to_save, 'wb+')\n for chunk in file_obj.chunks():\n destination.write(chunk)\n destination.close()\n\ndef get_externalsubscribers(file_obj):\n pass_count = 0\n fail_count = 0\n PATH = '/tmp/import_subscribers.xls'\n upload_handler(file_obj, PATH)\n sheet = xlrd.open_workbook(PATH).sheet_by_index(0)\n for i in range(1,sheet.nrows):\n row = sheet.row(i)\n if not row[0].value:\n continue\n subscriber = {}\n subscriber['email'] = row[0].value\n try:\n validate_email(subscriber['email'].strip(), i)\n pass_count+=1\n except Exception as e:\n fail_count+=1\n #print e, u'\"%s\"' % subscriber['email']\n continue\n\n try:\n subscriber['first_name'] = row[1].value\n except IndexError:\n pass\n\n try:\n subscriber['last_name'] = row[2].value\n except IndexError:\n pass\n\n if not bool(Account.objects.filter(email=subscriber['email']).only('id')):\n obj, created = ExternalSubscriber.objects.get_or_create(\n email=subscriber['email'],\n defaults={\n 'first_name': subscriber.get('first_name'),\n 'last_name': subscriber.get('last_name'),\n }\n )\n if not created:\n for field in ['first_name', 'last_name']:\n if subscriber.get(field) and\\\n getattr(obj, field) != subscriber.get(field):\n setattr(obj, field, subscriber.get(field))\n obj.save()\n\n return pass_count, fail_count\n\n@render_to('newsletter/import_subscribers_form.html')\ndef import_subscribers(request):\n if request.method == 'POST':\n form = ExternalSubscriberUpload(request.POST, request.FILES)\n if form.is_valid():\n passed, failed = get_externalsubscribers(form.cleaned_data['xls'])\n messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed})\n\n return redirect('admin:newsletter_externalsubscriber_changelist')\n else:\n form = ExternalSubscriberUpload()\n return {'form': form}\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
<|reserved_special_token_0|>
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
<|reserved_special_token_0|>
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
<|reserved_special_token_0|>
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4],
row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4
], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None), 'fips': request.args.
get('fipsKey', False)}
<|reserved_special_token_0|>
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4],
row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4
], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None), 'fips': request.args.
get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter
).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4],
row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4
], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None), 'fips': request.args.
get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter
).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
<|reserved_special_token_1|>
import csv
import us
from flask import abort, Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None,
get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county,
get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args()
)
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4],
row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4
], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None), 'fips': request.args.
get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter
).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
<|reserved_special_token_1|>
import csv
import us
from flask import abort, Flask, request, render_template
app = Flask(__name__) # pylint: disable=invalid-name
@app.route('/')
def root():
return render_template('index.html')
@app.route('/api')
def index():
return render_template('index.html')
@app.route('/api/total/counties')
def total_counties():
return process_counties_total(read_macro('county'), get_args())
@app.route('/api/total/counties/<state>')
def total_counties_state(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/counties/<state>/<county>')
def total_counties_state_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/total/states')
def total_states():
return country_view_total(read_macro('country'), get_args())
@app.route('/api/total/states/<state>')
def total_states_state(state):
return state_view_total(read_macro('country'), state, get_args())
@app.route('/api/total/states/<state>/counties')
def total_states_state_counties(state):
return process_state_counties_total(read_macro('county'), state, None, get_args())
@app.route('/api/total/states/<state>/counties/<county>')
def total_states_state_counties_county(state, county):
return process_state_counties_total(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/counties')
def timeline_counties():
return process_country_county(read_macro('county'), get_args())
@app.route('/api/timeline/counties/<state>')
def timeline_counties_state(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/counties/<state>/<county>')
def timeline_counties_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
@app.route('/api/timeline/states')
def timeline_states():
return country_view(read_macro('country'), get_args())
@app.route('/api/timeline/states/<state>')
def timeline_state(state):
return state_view(read_macro('country'), state, get_args())
@app.route('/api/timeline/states/<state>/counties')
def timeline_state_counties(state):
return process_state_county(read_macro('county'), state, None, get_args())
@app.route('/api/timeline/states/<state>/counties/<county>')
def timeline_state_county(state, county):
return process_state_county(read_macro('county'), state, county, get_args())
def state_view_total(data, state_filter, args):
data = filter_country_state(data, state_filter)
result = process_mode(args, data[-1][3], data[-1][4])
result = str(result) if isinstance(result, int) else result
return result
def state_view(data, state_filter, args):
result = {}
data = filter_country_state(data, state_filter)
for row in data:
result[row[0]] = process_mode(args, row[3], row[4])
return result
def country_view_total(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in reversed(data):
if row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[3], row[4])
return dataset
def country_view(data, args):
dataset = {}
key_row = get_key_row(args, 'country')
for row in data:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])
return dataset
def process_state_counties_total(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
result = process_county_data_total(data, county_filter, args)
if isinstance(result, int):
result = str(result)
return result
return process_state_data_total(data, args)
def process_state_data_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
if row[key_row] and row[key_row] not in dataset:
dataset[row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_state_county(data, state_filter, county_filter, args):
data = filter_state(data, state_filter)
if county_filter:
return process_county_data(data, county_filter, args)
return process_state_data(data, args)
def process_county_data_total(data, county_filter, args):
for row in reversed(data):
if compare_county(county_filter, row[1], row[3]):
return process_mode(args, row[4], row[5])
return None
def process_county_data(data, county_filter, args):
dataset = {}
for row in data:
if compare_county(county_filter, row[1], row[3]):
dataset[row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_state_data(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
if row[key_row]:
if row[key_row] not in dataset:
dataset[row[key_row]] = {}
dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_counties_total(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in reversed(data):
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])
return dataset
def process_country_county(data, args):
dataset = {}
key_row = get_key_row(args, 'state')
for row in data:
state_key = get_state_key(args, row[2])
if state_key not in dataset:
dataset[state_key] = {}
if row[key_row] not in dataset[state_key]:
dataset[state_key][row[key_row]] = {}
dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])
return dataset
def process_mode(args, cases, deaths):
if args['mode'] == 'cases':
return int(cases)
if args['mode'] == 'deaths':
return int(deaths)
return {'cases': cases, 'deaths': deaths}
def filter_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[2]):
result.append(row)
return result
def filter_country_state(data, state_filter):
result = []
for row in data:
if compare_state(state_filter, row[1]):
result.append(row)
return result
def read_macro(macro):
cv_data = []
with open(get_macro_file(macro), newline='') as data_file:
data_reader = csv.reader(data_file)
for row in data_reader:
cv_data.append(row)
cv_data.pop(0)
return cv_data
def get_macro_file(macro):
file = None
if macro == 'county':
file = 'county.csv'
elif macro == 'state':
file = 'county.csv'
elif macro == 'country':
file = 'state.csv'
if not file:
abort(500)
return file
def get_args():
return {'mode': request.args.get('mode', None),
'fips': request.args.get('fipsKey', False)}
def compare_state(state_filter, entry):
if str_normalize(entry) == str_normalize(state_filter):
return True
if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:
return True
return False
def compare_county(county_filter, entry, fips_entry):
if str_normalize(entry) == str_normalize(county_filter):
return True
if county_filter == fips_entry:
return True
return False
def str_normalize(words):
return words.replace(' ', '').lower().capitalize()
def get_key_row(args, locale):
if locale == 'state':
key_row = 3 if args['fips'] else 1
else:
key_row = 2 if args['fips'] else 1
return key_row
def get_state_key(args, state):
if args['fips']:
return us.states.lookup(state).fips
return state
|
flexible
|
{
"blob_id": "af00c6f443426b1f61e1816d7d14ebc7e6871a82",
"index": 5562,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n<mask token>\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n<mask token>\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\n<mask token>\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\n<mask token>\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-4": "import csv\nimport us\nfrom flask import abort, Flask, request, render_template\napp = Flask(__name__)\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None,\n get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county,\n get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args()\n )\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4],\n row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4\n ], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None), 'fips': request.args.\n get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter\n ).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-5": "import csv\nimport us\n\nfrom flask import abort, Flask, request, render_template\n\napp = Flask(__name__) # pylint: disable=invalid-name\n\n\n@app.route('/')\ndef root():\n return render_template('index.html')\n\n\n@app.route('/api')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/api/total/counties')\ndef total_counties():\n return process_counties_total(read_macro('county'), get_args())\n\n\n@app.route('/api/total/counties/<state>')\ndef total_counties_state(state):\n return process_state_counties_total(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/total/counties/<state>/<county>')\ndef total_counties_state_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county, get_args())\n\n\n@app.route('/api/total/states')\ndef total_states():\n return country_view_total(read_macro('country'), get_args())\n\n\n@app.route('/api/total/states/<state>')\ndef total_states_state(state):\n return state_view_total(read_macro('country'), state, get_args())\n\n\n@app.route('/api/total/states/<state>/counties')\ndef total_states_state_counties(state):\n return process_state_counties_total(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/total/states/<state>/counties/<county>')\ndef total_states_state_counties_county(state, county):\n return process_state_counties_total(read_macro('county'), state, county, get_args())\n\n\n@app.route('/api/timeline/counties')\ndef timeline_counties():\n return process_country_county(read_macro('county'), get_args())\n\n\n@app.route('/api/timeline/counties/<state>')\ndef timeline_counties_state(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/counties/<state>/<county>')\ndef timeline_counties_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args())\n\n\n@app.route('/api/timeline/states')\ndef timeline_states():\n return country_view(read_macro('country'), get_args())\n\n\n@app.route('/api/timeline/states/<state>')\ndef timeline_state(state):\n return state_view(read_macro('country'), state, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties')\ndef timeline_state_counties(state):\n return process_state_county(read_macro('county'), state, None, get_args())\n\n\n@app.route('/api/timeline/states/<state>/counties/<county>')\ndef timeline_state_county(state, county):\n return process_state_county(read_macro('county'), state, county, get_args())\n\n\ndef state_view_total(data, state_filter, args):\n data = filter_country_state(data, state_filter)\n result = process_mode(args, data[-1][3], data[-1][4])\n result = str(result) if isinstance(result, int) else result\n return result\n\n\ndef state_view(data, state_filter, args):\n result = {}\n data = filter_country_state(data, state_filter)\n for row in data:\n result[row[0]] = process_mode(args, row[3], row[4])\n return result\n\n\ndef country_view_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in reversed(data):\n if row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef country_view(data, args):\n dataset = {}\n key_row = get_key_row(args, 'country')\n for row in data:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[3], row[4])\n return dataset\n\n\ndef process_state_counties_total(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n result = process_county_data_total(data, county_filter, args)\n if isinstance(result, int):\n result = str(result)\n return result\n return process_state_data_total(data, args)\n\n\ndef process_state_data_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n if row[key_row] and row[key_row] not in dataset:\n dataset[row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_county(data, state_filter, county_filter, args):\n data = filter_state(data, state_filter)\n if county_filter:\n return process_county_data(data, county_filter, args)\n return process_state_data(data, args)\n\n\ndef process_county_data_total(data, county_filter, args):\n for row in reversed(data):\n if compare_county(county_filter, row[1], row[3]):\n return process_mode(args, row[4], row[5])\n return None\n\n\ndef process_county_data(data, county_filter, args):\n dataset = {}\n for row in data:\n if compare_county(county_filter, row[1], row[3]):\n dataset[row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_state_data(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n if row[key_row]:\n if row[key_row] not in dataset:\n dataset[row[key_row]] = {}\n dataset[row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_counties_total(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in reversed(data):\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_country_county(data, args):\n dataset = {}\n key_row = get_key_row(args, 'state')\n for row in data:\n state_key = get_state_key(args, row[2])\n if state_key not in dataset:\n dataset[state_key] = {}\n if row[key_row] not in dataset[state_key]:\n dataset[state_key][row[key_row]] = {}\n dataset[state_key][row[key_row]][row[0]] = process_mode(args, row[4], row[5])\n return dataset\n\n\ndef process_mode(args, cases, deaths):\n if args['mode'] == 'cases':\n return int(cases)\n if args['mode'] == 'deaths':\n return int(deaths)\n return {'cases': cases, 'deaths': deaths}\n\n\ndef filter_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[2]):\n result.append(row)\n return result\n\n\ndef filter_country_state(data, state_filter):\n result = []\n for row in data:\n if compare_state(state_filter, row[1]):\n result.append(row)\n return result\n\n\ndef read_macro(macro):\n cv_data = []\n with open(get_macro_file(macro), newline='') as data_file:\n data_reader = csv.reader(data_file)\n for row in data_reader:\n cv_data.append(row)\n cv_data.pop(0)\n return cv_data\n\n\ndef get_macro_file(macro):\n file = None\n if macro == 'county':\n file = 'county.csv'\n elif macro == 'state':\n file = 'county.csv'\n elif macro == 'country':\n file = 'state.csv'\n if not file:\n abort(500)\n return file\n\n\ndef get_args():\n return {'mode': request.args.get('mode', None),\n 'fips': request.args.get('fipsKey', False)}\n\n\ndef compare_state(state_filter, entry):\n if str_normalize(entry) == str_normalize(state_filter):\n return True\n if us.states.lookup(state_filter) and us.states.lookup(state_filter).name == entry:\n return True\n return False\n\n\ndef compare_county(county_filter, entry, fips_entry):\n if str_normalize(entry) == str_normalize(county_filter):\n return True\n if county_filter == fips_entry:\n return True\n return False\n\n\ndef str_normalize(words):\n return words.replace(' ', '').lower().capitalize()\n\n\ndef get_key_row(args, locale):\n if locale == 'state':\n key_row = 3 if args['fips'] else 1\n else:\n key_row = 2 if args['fips'] else 1\n return key_row\n\n\ndef get_state_key(args, state):\n if args['fips']:\n return us.states.lookup(state).fips\n return state\n",
"step-ids": [
34,
39,
40,
41,
42
]
}
|
[
34,
39,
40,
41,
42
] |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
# prevent numpy exponential
# notation on print, default False
np.set_printoptions(suppress=True)
y_cord_df = pd.DataFrame(data=None, columns=['Time', 'Orien'])
list_no = np.arange(0.0, 108000.0, 1.0)
y_cord_df['Time'] = (list_no*(1/60))/60
rolling_avg_duration= 10 #in seconds
def vel_det(file, legend_label, line_color):
fps=60
data_df = pd.read_hdf(path_or_buf=file)
bodyparts = data_df.columns.get_level_values(1)
coords = data_df.columns.get_level_values(2)
bodyparts2plot = bodyparts
scorer = data_df.columns.get_level_values(0)[0]
Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))
column_title = bodyparts + "_" + coords
data_df.columns = column_title
# calculate the time elapsed per frame and append column
data_df['Time Elapsed'] = Time / fps
# print(data_df)
# what's being plotted
# plt.plot(data_df['Time Elapsed'], data_df['velocity_roll'], color=line_color, marker='o', markersize=0.4, linewidth=0.3, label=legend_label) # scatter plot with faint lines
# plt.plot(data_df['Time Elapsed']/60, data_df['velocity_roll'], color=line_color, linewidth=1, label=legend_label)
# plot formatting
# plt.xlabel('time (seconds)')
# plt.ylabel('velocity (pixels/second)')
# plt.legend(loc=2)
# plt.title('total distance traveled vs. time: ' + path)
animal = []
animal[:] = ' '.join(file.split()[2:5])
# plt.title('Total Distance vs. Time for: ' + ' '.join(file.split()[:2]) + " "+ ''.join(animal[:2]))
# plt.title(str(rolling_avg_duration)+' second Rolling Velocity Pretreat 3mkgNaltrexone+5mgkg U50')
data_df['Time Elapsed'] = Time / fps
y_cord_df[file] = data_df['head_y']
y_cord_df[file+'_orient'] = np.NaN
i = 0
# rear_values = data_df['head_y'].values<=300
rear_values = data_df['head_y'].values <= 300
print(rear_values)
data_df['Orientation']=rear_values
data_df['GR'] = 'groom'
data_df.loc[rear_values == True, 'GR'] = 'rear'
# for time in Time:
# if data_df['head_y'].iloc[time] >= 234:
# data_df[file + '_orient'] = 'rear'
# i=1+i
# # using 1 for rear
# else:
# # 0 for groom/walk
# data_df[file + '_orient'] = 'groom'
# i=1+i
# print(data_df)
# for values in data_df['head_y']:
# if values >= 234:
# y_cord_df.insert(loc=data_df.loc[], column=file + '_orient', value=1, allow_duplicates=True)
# else:
# # 0 for groom/walk
# y_cord_df.insert(loc=i, column=file+'_orient', value=0, allow_duplicates=True)
# i = i+1
# print('iter'+str(i))
# print(data_df['Orientation'])
filt_df = data_df['head_y'] > 400
print(data_df[filt_df])
plt.figure(figsize=(6, 9.5))
# plt.plot(data_df['Time Elapsed']/60, data_df["GR"], color=line_color, linewidth=1, label=legend_label)
# plt.plot(data_df['Time Elapsed']/60, data_df['head_y']*-1, color=line_color, linewidth=1, label=legend_label)
plt.plot(data_df[filt_df].head_y,data_df[filt_df].index/3600, color=line_color, linewidth=1, label=legend_label)
# plt.axhline(y=-300)
leg = plt.legend()
font = {'family': 'Arial',
'size': 12}
plt.rc('font', **font)
plt.rc('lines', linewidth = 1)
for i in leg.legendHandles:
i.set_linewidth(3)
plt.xlabel('y coordinate(pixels)', fontsize=12)
plt.ylabel('time(minutes)', fontsize=12)
plt.title(legend_label)
plt.savefig(legend_label+'.jpg', format='jpg')
plt.show()
if __name__ == '__main__':
"""Saline Data"""
# vel_det(file='Saline_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline F0', line_color='yellowgreen')
# vel_det(file='Saline_Ai14_OPRK1_C2_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline F1', line_color='lightgreen')
# vel_det(file='Saline_Ai14_OPRK1_C1_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline F2', line_color='lightgreen')
#
# vel_det(file='Saline_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M1', line_color='green')
# vel_det(file='Saline_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M2', line_color='lightgreen')
# vel_det(file='Saline_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M3', line_color='lightgreen')
# vel_det(file='Saline_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='Saline M4', line_color='lime')
# only_saline = y_cord_df.loc[:, ['Saline_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C2_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Saline_Ai14_OPRK1_C1_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]
# y_cord_df['Avg Vel Saline'] = only_saline.mean(axis=1)
# avg_df['Avg Vel Saline SEM'] = stats.sem(only_saline, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel Saline'], color='black', linewidth=1, label='Average Velocity Saline+Saline')
#
"""Naltrexone Data"""
# vel_det(file='Naltr_U50_Ai14_OPRK1_C2_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F0 Pretreat 3mkg Naltrexone+5mgkg U50', line_color='#ee4466')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000filtered.h5',
# legend_label='F1 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='orangered')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F2 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='darkred')
#
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M1 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='red')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M2 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='red')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M3 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='firebrick')
# vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M4 Pretreat 3mgkg Naltrexone+5mkg U50', line_color='darksalmon')
# only_naltr = avg_df.loc[:,
# ['Nalt_U50_Ai14_OPRK1_C1_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Naltr_U50_Ai14_OPRK1_C2_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'Nalt_U50_Ai14_OPRK1_C1_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]
# avg_df['Avg Vel Naltr'] = only_naltr.mean(axis=1)
# avg_df['Avg Vel Naltr SEM'] = stats.sem(only_naltr, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel Naltr'], color='red', linewidth=1, label='Average Velocity 3mgkg Naltr+5mgkg U50')
#
#
"""U50 Data"""
vel_det(file='U50_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='F0 5mgkg U50', line_color='steelblue')
vel_det(file='U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='F1 5mgkg U50', line_color='deepskyblue')
vel_det(file='U50_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='F2 5mgkg U50', line_color='powderblue')
vel_det(file='U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M1 5mgkg U50', line_color='blue')
vel_det(file='U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M2 5mgkg U50', line_color='blue')
vel_det(file='U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M3 5mgkg U50', line_color='lightblue')
vel_det(file='U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
legend_label='M4 5mgkg U50', line_color='turquoise')
# only_U50 = avg_df.loc[:,
# ['U50_Ai14_OPRK1_C1_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C2_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',
# 'U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]
# avg_df['Avg Vel U50'] = only_U50.mean(axis=1)
# avg_df['Avg Vel U50 SEM'] = stats.sem(only_U50, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel U50'], color='orange', linewidth=1, label='Average Velocity Saline+5mgkg U50')
#
#
"""NORBNI U50 Data"""
#
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F0_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F0 10mgkg NORBNI+5mgkg U50', line_color='orange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F1_sDLC_resnet50_SideViewNov1shuffle1_180000filtered.h5',
# legend_label='F1 10mgkg NORBNI+5mgkg U50', line_color='darkorange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F2_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F2 10mgkg NORBNI+5mgkg U50', line_color='coral')
#
#
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M1_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M1 10mgkg NORBNI+5mgkg U50', line_color='orange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M2_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M2 10mgkg NORBNI+5mgkg U50', line_color='orange')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M3_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M3 10mgkg NORBNI+5mgkg U50', line_color='orange') #tiger color
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M4_SDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M4 10mgkg NORBNI+5mkg U50', line_color='#ed8203') #apricot color
# only_NORBNI = avg_df.loc[:,
# [
# 'NORBNI_U50_Ai14_OPRK1_C2_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',
# 'NORBNI_U50_Ai14_OPRK1_C2_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',
# 'NORBNI_U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',
# 'NORBNI_U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5'
# ]]
# avg_df['Avg Vel NORBNI'] = only_NORBNI.mean(axis=1)
# avg_df['Avg Vel NORBNI SEM'] = stats.sem(only_NORBNI, axis=1)
# plt.plot(avg_df['Time'], avg_df['Avg Vel NORBNI'], color='blue', linewidth=1,
# label='Average Velocity 10mgkg NORBNI +5mgkg U50')
#
"""NORBNI Saline"""
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C2_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F1 10mgkg NORBNI+Saline', line_color='purple')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F2 10mgkg NORBNI+Saline', line_color='purple')
# vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F0_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='F0 10mgkg NORBNI+Saline', line_color='violet')
#
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M1 10mgkg NORBNI+Saline', line_color='blueviolet')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M2 10mgkg NORBNI+Saline', line_color='blueviolet')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M4 10mkg NORBNI+Saline', line_color='mediumorchid')
# vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',
# legend_label='M3 10mgkg NORBNI+Saline', line_color='purple')
#
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel Saline"]-avg_df["Avg Vel Saline SEM"],
# avg_df["Avg Vel Saline"]+avg_df["Avg Vel Saline SEM"], alpha=0.25, facecolor='black', edgecolor='black')
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel Naltr"]-avg_df["Avg Vel Naltr SEM"],
# avg_df["Avg Vel Naltr"]+avg_df["Avg Vel Naltr SEM"], alpha=0.25, facecolor='red', edgecolor='red')
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel U50"]-avg_df["Avg Vel U50 SEM"],
# avg_df["Avg Vel U50"]+avg_df["Avg Vel U50 SEM"], alpha=0.25, facecolor='orange', edgecolor='orange')
# plt.fill_between(avg_df['Time'], avg_df["Avg Vel NORBNI"]-avg_df["Avg Vel NORBNI SEM"],
# avg_df["Avg Vel NORBNI"]+avg_df["Avg Vel NORBNI SEM"], alpha=0.25, facecolor='blue', edgecolor='blue')
# plt.plot()
# leg = plt.legend()
# font = {'family': 'Arial',
# 'size': 12}
# plt.rc('font', **font)
# plt.rc('lines', linewidth = 1)
# for i in leg.legendHandles:
# i.set_linewidth(3)
# plt.xlabel('time (minutes)', fontsize=12)
# plt.ylabel('pixel', fontsize=12)
# plt.title('F2 NORBNI, NORBNI+U50, Saline Head Inverted Y-coordinate')
# plt.show()
|
normal
|
{
"blob_id": "ba5171d3de87ec01770a7174d9783d5058b0fced",
"index": 9896,
"step-1": "<mask token>\n\n\ndef vel_det(file, legend_label, line_color):\n fps = 60\n data_df = pd.read_hdf(path_or_buf=file)\n bodyparts = data_df.columns.get_level_values(1)\n coords = data_df.columns.get_level_values(2)\n bodyparts2plot = bodyparts\n scorer = data_df.columns.get_level_values(0)[0]\n Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))\n column_title = bodyparts + '_' + coords\n data_df.columns = column_title\n data_df['Time Elapsed'] = Time / fps\n animal = []\n animal[:] = ' '.join(file.split()[2:5])\n data_df['Time Elapsed'] = Time / fps\n y_cord_df[file] = data_df['head_y']\n y_cord_df[file + '_orient'] = np.NaN\n i = 0\n rear_values = data_df['head_y'].values <= 300\n print(rear_values)\n data_df['Orientation'] = rear_values\n data_df['GR'] = 'groom'\n data_df.loc[rear_values == True, 'GR'] = 'rear'\n filt_df = data_df['head_y'] > 400\n print(data_df[filt_df])\n plt.figure(figsize=(6, 9.5))\n plt.plot(data_df[filt_df].head_y, data_df[filt_df].index / 3600, color=\n line_color, linewidth=1, label=legend_label)\n leg = plt.legend()\n font = {'family': 'Arial', 'size': 12}\n plt.rc('font', **font)\n plt.rc('lines', linewidth=1)\n for i in leg.legendHandles:\n i.set_linewidth(3)\n plt.xlabel('y coordinate(pixels)', fontsize=12)\n plt.ylabel('time(minutes)', fontsize=12)\n plt.title(legend_label)\n plt.savefig(legend_label + '.jpg', format='jpg')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.set_printoptions(suppress=True)\n<mask token>\n\n\ndef vel_det(file, legend_label, line_color):\n fps = 60\n data_df = pd.read_hdf(path_or_buf=file)\n bodyparts = data_df.columns.get_level_values(1)\n coords = data_df.columns.get_level_values(2)\n bodyparts2plot = bodyparts\n scorer = data_df.columns.get_level_values(0)[0]\n Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))\n column_title = bodyparts + '_' + coords\n data_df.columns = column_title\n data_df['Time Elapsed'] = Time / fps\n animal = []\n animal[:] = ' '.join(file.split()[2:5])\n data_df['Time Elapsed'] = Time / fps\n y_cord_df[file] = data_df['head_y']\n y_cord_df[file + '_orient'] = np.NaN\n i = 0\n rear_values = data_df['head_y'].values <= 300\n print(rear_values)\n data_df['Orientation'] = rear_values\n data_df['GR'] = 'groom'\n data_df.loc[rear_values == True, 'GR'] = 'rear'\n filt_df = data_df['head_y'] > 400\n print(data_df[filt_df])\n plt.figure(figsize=(6, 9.5))\n plt.plot(data_df[filt_df].head_y, data_df[filt_df].index / 3600, color=\n line_color, linewidth=1, label=legend_label)\n leg = plt.legend()\n font = {'family': 'Arial', 'size': 12}\n plt.rc('font', **font)\n plt.rc('lines', linewidth=1)\n for i in leg.legendHandles:\n i.set_linewidth(3)\n plt.xlabel('y coordinate(pixels)', fontsize=12)\n plt.ylabel('time(minutes)', fontsize=12)\n plt.title(legend_label)\n plt.savefig(legend_label + '.jpg', format='jpg')\n plt.show()\n\n\nif __name__ == '__main__':\n \"\"\"Saline Data\"\"\"\n \"\"\"Naltrexone Data\"\"\"\n \"\"\"U50 Data\"\"\"\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F0 5mgkg U50', line_color='steelblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F1 5mgkg U50', line_color='deepskyblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F2 5mgkg U50', line_color='powderblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M1 5mgkg U50', line_color='blue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M2 5mgkg U50', line_color='blue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M3 5mgkg U50', line_color='lightblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M4 5mgkg U50', line_color='turquoise')\n \"\"\"NORBNI U50 Data\"\"\"\n \"\"\"NORBNI Saline\"\"\"\n",
"step-3": "<mask token>\nnp.set_printoptions(suppress=True)\ny_cord_df = pd.DataFrame(data=None, columns=['Time', 'Orien'])\nlist_no = np.arange(0.0, 108000.0, 1.0)\ny_cord_df['Time'] = list_no * (1 / 60) / 60\nrolling_avg_duration = 10\n\n\ndef vel_det(file, legend_label, line_color):\n fps = 60\n data_df = pd.read_hdf(path_or_buf=file)\n bodyparts = data_df.columns.get_level_values(1)\n coords = data_df.columns.get_level_values(2)\n bodyparts2plot = bodyparts\n scorer = data_df.columns.get_level_values(0)[0]\n Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))\n column_title = bodyparts + '_' + coords\n data_df.columns = column_title\n data_df['Time Elapsed'] = Time / fps\n animal = []\n animal[:] = ' '.join(file.split()[2:5])\n data_df['Time Elapsed'] = Time / fps\n y_cord_df[file] = data_df['head_y']\n y_cord_df[file + '_orient'] = np.NaN\n i = 0\n rear_values = data_df['head_y'].values <= 300\n print(rear_values)\n data_df['Orientation'] = rear_values\n data_df['GR'] = 'groom'\n data_df.loc[rear_values == True, 'GR'] = 'rear'\n filt_df = data_df['head_y'] > 400\n print(data_df[filt_df])\n plt.figure(figsize=(6, 9.5))\n plt.plot(data_df[filt_df].head_y, data_df[filt_df].index / 3600, color=\n line_color, linewidth=1, label=legend_label)\n leg = plt.legend()\n font = {'family': 'Arial', 'size': 12}\n plt.rc('font', **font)\n plt.rc('lines', linewidth=1)\n for i in leg.legendHandles:\n i.set_linewidth(3)\n plt.xlabel('y coordinate(pixels)', fontsize=12)\n plt.ylabel('time(minutes)', fontsize=12)\n plt.title(legend_label)\n plt.savefig(legend_label + '.jpg', format='jpg')\n plt.show()\n\n\nif __name__ == '__main__':\n \"\"\"Saline Data\"\"\"\n \"\"\"Naltrexone Data\"\"\"\n \"\"\"U50 Data\"\"\"\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F0 5mgkg U50', line_color='steelblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F1 5mgkg U50', line_color='deepskyblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F2 5mgkg U50', line_color='powderblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M1 5mgkg U50', line_color='blue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M2 5mgkg U50', line_color='blue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M3 5mgkg U50', line_color='lightblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M4 5mgkg U50', line_color='turquoise')\n \"\"\"NORBNI U50 Data\"\"\"\n \"\"\"NORBNI Saline\"\"\"\n",
"step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nnp.set_printoptions(suppress=True)\ny_cord_df = pd.DataFrame(data=None, columns=['Time', 'Orien'])\nlist_no = np.arange(0.0, 108000.0, 1.0)\ny_cord_df['Time'] = list_no * (1 / 60) / 60\nrolling_avg_duration = 10\n\n\ndef vel_det(file, legend_label, line_color):\n fps = 60\n data_df = pd.read_hdf(path_or_buf=file)\n bodyparts = data_df.columns.get_level_values(1)\n coords = data_df.columns.get_level_values(2)\n bodyparts2plot = bodyparts\n scorer = data_df.columns.get_level_values(0)[0]\n Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))\n column_title = bodyparts + '_' + coords\n data_df.columns = column_title\n data_df['Time Elapsed'] = Time / fps\n animal = []\n animal[:] = ' '.join(file.split()[2:5])\n data_df['Time Elapsed'] = Time / fps\n y_cord_df[file] = data_df['head_y']\n y_cord_df[file + '_orient'] = np.NaN\n i = 0\n rear_values = data_df['head_y'].values <= 300\n print(rear_values)\n data_df['Orientation'] = rear_values\n data_df['GR'] = 'groom'\n data_df.loc[rear_values == True, 'GR'] = 'rear'\n filt_df = data_df['head_y'] > 400\n print(data_df[filt_df])\n plt.figure(figsize=(6, 9.5))\n plt.plot(data_df[filt_df].head_y, data_df[filt_df].index / 3600, color=\n line_color, linewidth=1, label=legend_label)\n leg = plt.legend()\n font = {'family': 'Arial', 'size': 12}\n plt.rc('font', **font)\n plt.rc('lines', linewidth=1)\n for i in leg.legendHandles:\n i.set_linewidth(3)\n plt.xlabel('y coordinate(pixels)', fontsize=12)\n plt.ylabel('time(minutes)', fontsize=12)\n plt.title(legend_label)\n plt.savefig(legend_label + '.jpg', format='jpg')\n plt.show()\n\n\nif __name__ == '__main__':\n \"\"\"Saline Data\"\"\"\n \"\"\"Naltrexone Data\"\"\"\n \"\"\"U50 Data\"\"\"\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F0 5mgkg U50', line_color='steelblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F1 5mgkg U50', line_color='deepskyblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='F2 5mgkg U50', line_color='powderblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M1 5mgkg U50', line_color='blue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M2 5mgkg U50', line_color='blue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M3 5mgkg U50', line_color='lightblue')\n vel_det(file=\n 'U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5'\n , legend_label='M4 5mgkg U50', line_color='turquoise')\n \"\"\"NORBNI U50 Data\"\"\"\n \"\"\"NORBNI Saline\"\"\"\n",
"step-5": "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n\n# prevent numpy exponential\n# notation on print, default False\nnp.set_printoptions(suppress=True)\n\ny_cord_df = pd.DataFrame(data=None, columns=['Time', 'Orien'])\nlist_no = np.arange(0.0, 108000.0, 1.0)\ny_cord_df['Time'] = (list_no*(1/60))/60\nrolling_avg_duration= 10 #in seconds\n\ndef vel_det(file, legend_label, line_color):\n fps=60\n\n data_df = pd.read_hdf(path_or_buf=file)\n bodyparts = data_df.columns.get_level_values(1)\n coords = data_df.columns.get_level_values(2)\n bodyparts2plot = bodyparts\n scorer = data_df.columns.get_level_values(0)[0]\n Time = np.arange(np.size(data_df[scorer][bodyparts2plot[0]]['x'].values))\n column_title = bodyparts + \"_\" + coords\n data_df.columns = column_title\n\n # calculate the time elapsed per frame and append column\n data_df['Time Elapsed'] = Time / fps\n\n # print(data_df)\n\n # what's being plotted\n # plt.plot(data_df['Time Elapsed'], data_df['velocity_roll'], color=line_color, marker='o', markersize=0.4, linewidth=0.3, label=legend_label) # scatter plot with faint lines\n # plt.plot(data_df['Time Elapsed']/60, data_df['velocity_roll'], color=line_color, linewidth=1, label=legend_label)\n # plot formatting\n # plt.xlabel('time (seconds)')\n # plt.ylabel('velocity (pixels/second)')\n # plt.legend(loc=2)\n # plt.title('total distance traveled vs. time: ' + path)\n animal = []\n animal[:] = ' '.join(file.split()[2:5])\n # plt.title('Total Distance vs. Time for: ' + ' '.join(file.split()[:2]) + \" \"+ ''.join(animal[:2]))\n # plt.title(str(rolling_avg_duration)+' second Rolling Velocity Pretreat 3mkgNaltrexone+5mgkg U50')\n\n data_df['Time Elapsed'] = Time / fps\n y_cord_df[file] = data_df['head_y']\n y_cord_df[file+'_orient'] = np.NaN\n\n i = 0\n\n # rear_values = data_df['head_y'].values<=300\n rear_values = data_df['head_y'].values <= 300\n print(rear_values)\n data_df['Orientation']=rear_values\n data_df['GR'] = 'groom'\n data_df.loc[rear_values == True, 'GR'] = 'rear'\n\n # for time in Time:\n # if data_df['head_y'].iloc[time] >= 234:\n # data_df[file + '_orient'] = 'rear'\n # i=1+i\n # # using 1 for rear\n # else:\n # # 0 for groom/walk\n # data_df[file + '_orient'] = 'groom'\n # i=1+i\n # print(data_df)\n # for values in data_df['head_y']:\n # if values >= 234:\n # y_cord_df.insert(loc=data_df.loc[], column=file + '_orient', value=1, allow_duplicates=True)\n # else:\n # # 0 for groom/walk\n # y_cord_df.insert(loc=i, column=file+'_orient', value=0, allow_duplicates=True)\n # i = i+1\n # print('iter'+str(i))\n # print(data_df['Orientation'])\n filt_df = data_df['head_y'] > 400\n print(data_df[filt_df])\n plt.figure(figsize=(6, 9.5))\n # plt.plot(data_df['Time Elapsed']/60, data_df[\"GR\"], color=line_color, linewidth=1, label=legend_label)\n # plt.plot(data_df['Time Elapsed']/60, data_df['head_y']*-1, color=line_color, linewidth=1, label=legend_label)\n plt.plot(data_df[filt_df].head_y,data_df[filt_df].index/3600, color=line_color, linewidth=1, label=legend_label)\n\n # plt.axhline(y=-300)\n\n\n leg = plt.legend()\n font = {'family': 'Arial',\n 'size': 12}\n plt.rc('font', **font)\n plt.rc('lines', linewidth = 1)\n for i in leg.legendHandles:\n i.set_linewidth(3)\n plt.xlabel('y coordinate(pixels)', fontsize=12)\n plt.ylabel('time(minutes)', fontsize=12)\n plt.title(legend_label)\n\n\n plt.savefig(legend_label+'.jpg', format='jpg')\n plt.show()\nif __name__ == '__main__':\n\n \"\"\"Saline Data\"\"\"\n # vel_det(file='Saline_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline F0', line_color='yellowgreen')\n # vel_det(file='Saline_Ai14_OPRK1_C2_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline F1', line_color='lightgreen')\n # vel_det(file='Saline_Ai14_OPRK1_C1_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline F2', line_color='lightgreen')\n #\n # vel_det(file='Saline_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline M1', line_color='green')\n # vel_det(file='Saline_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline M2', line_color='lightgreen')\n # vel_det(file='Saline_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline M3', line_color='lightgreen')\n # vel_det(file='Saline_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='Saline M4', line_color='lime')\n\n\n # only_saline = y_cord_df.loc[:, ['Saline_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Saline_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Saline_Ai14_OPRK1_C2_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Saline_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Saline_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Saline_Ai14_OPRK1_C1_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Saline_Ai14_OPRK1_C1_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]\n # y_cord_df['Avg Vel Saline'] = only_saline.mean(axis=1)\n # avg_df['Avg Vel Saline SEM'] = stats.sem(only_saline, axis=1)\n # plt.plot(avg_df['Time'], avg_df['Avg Vel Saline'], color='black', linewidth=1, label='Average Velocity Saline+Saline')\n #\n \"\"\"Naltrexone Data\"\"\"\n # vel_det(file='Naltr_U50_Ai14_OPRK1_C2_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F0 Pretreat 3mkg Naltrexone+5mgkg U50', line_color='#ee4466')\n # vel_det(file='Nalt_U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000filtered.h5',\n # legend_label='F1 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='orangered')\n # vel_det(file='Nalt_U50_Ai14_OPRK1_C1_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F2 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='darkred')\n #\n # vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M1 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='red')\n # vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M2 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='red')\n # vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M3 Pretreat 3mgkg Naltrexone+5mgkg U50', line_color='firebrick')\n # vel_det(file='Nalt_U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M4 Pretreat 3mgkg Naltrexone+5mkg U50', line_color='darksalmon')\n\n # only_naltr = avg_df.loc[:,\n # ['Nalt_U50_Ai14_OPRK1_C1_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Nalt_U50_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Nalt_U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Nalt_U50_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Nalt_U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Naltr_U50_Ai14_OPRK1_C2_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'Nalt_U50_Ai14_OPRK1_C1_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]\n # avg_df['Avg Vel Naltr'] = only_naltr.mean(axis=1)\n # avg_df['Avg Vel Naltr SEM'] = stats.sem(only_naltr, axis=1)\n # plt.plot(avg_df['Time'], avg_df['Avg Vel Naltr'], color='red', linewidth=1, label='Average Velocity 3mgkg Naltr+5mgkg U50')\n #\n #\n \"\"\"U50 Data\"\"\"\n\n vel_det(file='U50_Ai14_OPRK1_C1_F0_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='F0 5mgkg U50', line_color='steelblue')\n vel_det(file='U50_Ai14_OPRK1_C1_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='F1 5mgkg U50', line_color='deepskyblue')\n vel_det(file='U50_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='F2 5mgkg U50', line_color='powderblue')\n\n vel_det(file='U50_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='M1 5mgkg U50', line_color='blue')\n vel_det(file='U50_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='M2 5mgkg U50', line_color='blue')\n vel_det(file='U50_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='M3 5mgkg U50', line_color='lightblue')\n vel_det(file='U50_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n legend_label='M4 5mgkg U50', line_color='turquoise')\n\n # only_U50 = avg_df.loc[:,\n # ['U50_Ai14_OPRK1_C1_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'U50_Ai14_OPRK1_C1_F0_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'U50_Ai14_OPRK1_C1_M1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'U50_Ai14_OPRK1_C1_M2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'U50_Ai14_OPRK1_C2_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5',\n # 'U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered - Copy.h5']]\n # avg_df['Avg Vel U50'] = only_U50.mean(axis=1)\n # avg_df['Avg Vel U50 SEM'] = stats.sem(only_U50, axis=1)\n # plt.plot(avg_df['Time'], avg_df['Avg Vel U50'], color='orange', linewidth=1, label='Average Velocity Saline+5mgkg U50')\n #\n #\n \"\"\"NORBNI U50 Data\"\"\"\n #\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F0_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F0 10mgkg NORBNI+5mgkg U50', line_color='orange')\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F1_sDLC_resnet50_SideViewNov1shuffle1_180000filtered.h5',\n # legend_label='F1 10mgkg NORBNI+5mgkg U50', line_color='darkorange')\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F2_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F2 10mgkg NORBNI+5mgkg U50', line_color='coral')\n #\n #\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M1_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M1 10mgkg NORBNI+5mgkg U50', line_color='orange')\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M2_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M2 10mgkg NORBNI+5mgkg U50', line_color='orange')\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M3_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M3 10mgkg NORBNI+5mgkg U50', line_color='orange') #tiger color\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C1_M4_SDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M4 10mgkg NORBNI+5mkg U50', line_color='#ed8203') #apricot color\n\n # only_NORBNI = avg_df.loc[:,\n # [\n # 'NORBNI_U50_Ai14_OPRK1_C2_F1_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',\n # 'NORBNI_U50_Ai14_OPRK1_C2_F2_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',\n # 'NORBNI_U50_Ai14_OPRK1_C1_M3_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5',\n # 'NORBNI_U50_Ai14_OPRK1_C1_M4_Top DownDLC_resnet50_BigBinTopSep17shuffle1_250000filtered.h5'\n # ]]\n # avg_df['Avg Vel NORBNI'] = only_NORBNI.mean(axis=1)\n # avg_df['Avg Vel NORBNI SEM'] = stats.sem(only_NORBNI, axis=1)\n # plt.plot(avg_df['Time'], avg_df['Avg Vel NORBNI'], color='blue', linewidth=1,\n # label='Average Velocity 10mgkg NORBNI +5mgkg U50')\n #\n \"\"\"NORBNI Saline\"\"\"\n # vel_det(file='NORBNI_Saline_Ai14_OPRK1_C2_F1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F1 10mgkg NORBNI+Saline', line_color='purple')\n # vel_det(file='NORBNI_Saline_Ai14_OPRK1_C2_F2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F2 10mgkg NORBNI+Saline', line_color='purple')\n # vel_det(file='NORBNI_U50_Ai14_OPRK1_C2_F0_sDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='F0 10mgkg NORBNI+Saline', line_color='violet')\n #\n # vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M1_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M1 10mgkg NORBNI+Saline', line_color='blueviolet')\n # vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M2_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M2 10mgkg NORBNI+Saline', line_color='blueviolet')\n # vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M4_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M4 10mkg NORBNI+Saline', line_color='mediumorchid')\n # vel_det(file='NORBNI_Saline_Ai14_OPRK1_C1_M3_side viewDLC_resnet50_SideViewNov1shuffle1_180000.h5',\n # legend_label='M3 10mgkg NORBNI+Saline', line_color='purple')\n #\n # plt.fill_between(avg_df['Time'], avg_df[\"Avg Vel Saline\"]-avg_df[\"Avg Vel Saline SEM\"],\n # avg_df[\"Avg Vel Saline\"]+avg_df[\"Avg Vel Saline SEM\"], alpha=0.25, facecolor='black', edgecolor='black')\n # plt.fill_between(avg_df['Time'], avg_df[\"Avg Vel Naltr\"]-avg_df[\"Avg Vel Naltr SEM\"],\n # avg_df[\"Avg Vel Naltr\"]+avg_df[\"Avg Vel Naltr SEM\"], alpha=0.25, facecolor='red', edgecolor='red')\n # plt.fill_between(avg_df['Time'], avg_df[\"Avg Vel U50\"]-avg_df[\"Avg Vel U50 SEM\"],\n # avg_df[\"Avg Vel U50\"]+avg_df[\"Avg Vel U50 SEM\"], alpha=0.25, facecolor='orange', edgecolor='orange')\n # plt.fill_between(avg_df['Time'], avg_df[\"Avg Vel NORBNI\"]-avg_df[\"Avg Vel NORBNI SEM\"],\n # avg_df[\"Avg Vel NORBNI\"]+avg_df[\"Avg Vel NORBNI SEM\"], alpha=0.25, facecolor='blue', edgecolor='blue')\n # plt.plot()\n # leg = plt.legend()\n # font = {'family': 'Arial',\n # 'size': 12}\n # plt.rc('font', **font)\n # plt.rc('lines', linewidth = 1)\n # for i in leg.legendHandles:\n # i.set_linewidth(3)\n # plt.xlabel('time (minutes)', fontsize=12)\n # plt.ylabel('pixel', fontsize=12)\n # plt.title('F2 NORBNI, NORBNI+U50, Saline Head Inverted Y-coordinate')\n # plt.show()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('admin/', admin.site.urls), path('post/', post_views.
post_list, name='post_list'), path('post/create/', post_views.
post_create, name='post_create'), path('post/detail/<int:post_pk>/',
post_views.post_detail, name='post_detail'), path(
'post/<int:post_pk>/comment/create/', post_views.comment_create, name=
'comment_create'), path('member/signup/', member_views.signup, name=
'signup'), path('member/login/', member_views.login, name='login')]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from member import views as member_views
from post import views as post_views
urlpatterns = [path('admin/', admin.site.urls), path('post/', post_views.
post_list, name='post_list'), path('post/create/', post_views.
post_create, name='post_create'), path('post/detail/<int:post_pk>/',
post_views.post_detail, name='post_detail'), path(
'post/<int:post_pk>/comment/create/', post_views.comment_create, name=
'comment_create'), path('member/signup/', member_views.signup, name=
'signup'), path('member/login/', member_views.login, name='login')]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
"""insta URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from member import views as member_views
from post import views as post_views
urlpatterns = [
# admin
path('admin/', admin.site.urls),
# post application
path('post/', post_views.post_list, name='post_list'),
path('post/create/', post_views.post_create, name='post_create'),
path('post/detail/<int:post_pk>/', post_views.post_detail, name='post_detail'),
path('post/<int:post_pk>/comment/create/', post_views.comment_create, name='comment_create'),
# member application
path('member/signup/', member_views.signup, name='signup'),
path('member/login/', member_views.login, name='login'),
]
# URL resolver는 settings.MEDIA_URL로 온 URL은
# view를 찾는 게 아니라 document_root에서 파일을 찾아 리턴해준다.
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT,
)
|
flexible
|
{
"blob_id": "63c0786d277c5576822d6e521f65850762ab5eb0",
"index": 9198,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-3": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('post/', post_views.\n post_list, name='post_list'), path('post/create/', post_views.\n post_create, name='post_create'), path('post/detail/<int:post_pk>/',\n post_views.post_detail, name='post_detail'), path(\n 'post/<int:post_pk>/comment/create/', post_views.comment_create, name=\n 'comment_create'), path('member/signup/', member_views.signup, name=\n 'signup'), path('member/login/', member_views.login, name='login')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-4": "<mask token>\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path\nfrom member import views as member_views\nfrom post import views as post_views\nurlpatterns = [path('admin/', admin.site.urls), path('post/', post_views.\n post_list, name='post_list'), path('post/create/', post_views.\n post_create, name='post_create'), path('post/detail/<int:post_pk>/',\n post_views.post_detail, name='post_detail'), path(\n 'post/<int:post_pk>/comment/create/', post_views.comment_create, name=\n 'comment_create'), path('member/signup/', member_views.signup, name=\n 'signup'), path('member/login/', member_views.login, name='login')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-5": "\"\"\"insta URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom member import views as member_views\nfrom post import views as post_views\n\n\nurlpatterns = [\n # admin\n path('admin/', admin.site.urls),\n # post application\n path('post/', post_views.post_list, name='post_list'),\n path('post/create/', post_views.post_create, name='post_create'),\n path('post/detail/<int:post_pk>/', post_views.post_detail, name='post_detail'),\n path('post/<int:post_pk>/comment/create/', post_views.comment_create, name='comment_create'),\n # member application\n path('member/signup/', member_views.signup, name='signup'),\n path('member/login/', member_views.login, name='login'),\n]\n\n# URL resolver는 settings.MEDIA_URL로 온 URL은\n# view를 찾는 게 아니라 document_root에서 파일을 찾아 리턴해준다.\nurlpatterns += static(\n settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT,\n)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
from requests import Response
from auditlogging.Trail import Trail
from utils.Utils import is_empty
from auditlogging.agents.AuditAgent import AuditAgent
class APIAuditAgent(AuditAgent):
"""
Captures the audit trail using a REST endpoint URL (POST)
Add this agent to Auditor in order to capture audit log to an endpoint.
Note
-----------
1. If user wants to POST custom JSON request body then,
pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)
2. After each call to capture() or capture_custom() latest response is preserved
until next endpoint request.
To get the response, after each invocation please call endpoint_response() to get response
"""
def __init__(self):
self._url = 'http://localhost:3000/auditlogs/create'
self._resp = None
def change_endpoint(self, url: str):
"""
Changes the default POST endpoint URL.
Caller can specify any POST endpoint URL to create resource in
database/storage.
Parameters
----------
url : str
a new POST endpoint URL
"""
if not is_empty(url):
self._url = url
def capture(self, trail: Trail):
"""
Capture Trail to endpoint. Internally it transforms JSON
object while calling POST endpoint
Parameters
----------
trail : Trail
a trail object to be used for POST
"""
self._call_endpoint(trail)
def capture_custom(self, jsontrail: str):
"""
Capture custom JSON trail to endpoint
Parameters
----------
jsontrail : str
custom JSON required for
"""
self._mark_json_trail(jsontrail)
def endpoint_response(self) ->Response:
"""
access the response of the endpoint URL
Returns
--------
Response
Http response
"""
return self._resp
def _set_response(self, resp: Response):
self._resp = resp
def _call_endpoint(self, trail: Trail):
_resp = requests.post(self._url, json=trail.build_trail())
if _resp.status_code is not 200:
print(_resp.json())
self._set_response(resp=_resp)
def _mark_json_trail(self, jsontrail: str):
_resp = requests.post(self._url, data=jsontrail)
self._set_response(resp=_resp)
|
normal
|
{
"blob_id": "45a57fac564f23253f9d9cd5d0fd820e559c15b9",
"index": 1212,
"step-1": "<mask token>\n\n\nclass APIAuditAgent(AuditAgent):\n <mask token>\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n <mask token>\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-2": "<mask token>\n\n\nclass APIAuditAgent(AuditAgent):\n <mask token>\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n\n def endpoint_response(self) ->Response:\n \"\"\"\n access the response of the endpoint URL\n\n Returns\n --------\n Response\n Http response\n\n \"\"\"\n return self._resp\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-3": "<mask token>\n\n\nclass APIAuditAgent(AuditAgent):\n \"\"\"\n Captures the audit trail using a REST endpoint URL (POST)\n Add this agent to Auditor in order to capture audit log to an endpoint.\n\n Note\n -----------\n 1. If user wants to POST custom JSON request body then,\n pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)\n 2. After each call to capture() or capture_custom() latest response is preserved\n until next endpoint request.\n To get the response, after each invocation please call endpoint_response() to get response\n\n \"\"\"\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n\n def endpoint_response(self) ->Response:\n \"\"\"\n access the response of the endpoint URL\n\n Returns\n --------\n Response\n Http response\n\n \"\"\"\n return self._resp\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-4": "import requests\nfrom requests import Response\nfrom auditlogging.Trail import Trail\nfrom utils.Utils import is_empty\nfrom auditlogging.agents.AuditAgent import AuditAgent\n\n\nclass APIAuditAgent(AuditAgent):\n \"\"\"\n Captures the audit trail using a REST endpoint URL (POST)\n Add this agent to Auditor in order to capture audit log to an endpoint.\n\n Note\n -----------\n 1. If user wants to POST custom JSON request body then,\n pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)\n 2. After each call to capture() or capture_custom() latest response is preserved\n until next endpoint request.\n To get the response, after each invocation please call endpoint_response() to get response\n\n \"\"\"\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n\n def endpoint_response(self) ->Response:\n \"\"\"\n access the response of the endpoint URL\n\n Returns\n --------\n Response\n Http response\n\n \"\"\"\n return self._resp\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-5": null,
"step-ids": [
8,
9,
10,
11
]
}
|
[
8,
9,
10,
11
] |
<|reserved_special_token_0|>
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print('Action: ', action, 'Rewards', reward)
if done:
print('Episode finished after {} timesteps'.format(t + 1))
break
<|reserved_special_token_0|>
def initial_games():
train_data = []
train_scores = []
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
for _ in range(num_episodes):
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
break
if score >= min_score:
train_scores.append(score)
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
env.reset()
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print('Action: ', action, 'Rewards', reward)
if done:
print('Episode finished after {} timesteps'.format(t + 1))
break
<|reserved_special_token_0|>
def initial_games():
train_data = []
train_scores = []
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
for _ in range(num_episodes):
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
break
if score >= min_score:
train_scores.append(score)
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
def nn_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def train_model(train_data, model=False):
x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][
0]), 1)
y = [i[1] for i in train_data]
if not model:
model = nn_model(input_size=len(x[0]))
model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,
show_metric=True, run_id='openai_learning')
return model
<|reserved_special_token_0|>
for _ in range(num_final_games):
total_score = 0
prev_obs = []
env.reset()
for _ in range(target_episodes):
if len(prev_obs) == 0:
action = random.randrange(0, 2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(
prev_obs), 1))[0])
all_actions.append(action)
observation, reward, done, info = env.step(action)
prev_obs = observation
total_score += reward
if done:
break
all_rewards.append(total_score)
print('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))
print('Max reward:', max(all_rewards))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
env = gym.make('CartPole-v0')
env.reset()
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print('Action: ', action, 'Rewards', reward)
if done:
print('Episode finished after {} timesteps'.format(t + 1))
break
num_games = 20000
num_episodes = 201
min_score = 75
def initial_games():
train_data = []
train_scores = []
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
for _ in range(num_episodes):
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
break
if score >= min_score:
train_scores.append(score)
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
def nn_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def train_model(train_data, model=False):
x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][
0]), 1)
y = [i[1] for i in train_data]
if not model:
model = nn_model(input_size=len(x[0]))
model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,
show_metric=True, run_id='openai_learning')
return model
train_data = initial_games()
model = train_model(train_data)
num_final_games = 10
target_episodes = 201
all_rewards = []
all_actions = []
for _ in range(num_final_games):
total_score = 0
prev_obs = []
env.reset()
for _ in range(target_episodes):
if len(prev_obs) == 0:
action = random.randrange(0, 2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(
prev_obs), 1))[0])
all_actions.append(action)
observation, reward, done, info = env.step(action)
prev_obs = observation
total_score += reward
if done:
break
all_rewards.append(total_score)
print('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))
print('Max reward:', max(all_rewards))
<|reserved_special_token_1|>
import gym
import random
import numpy as np
import statistics
from collections import Counter
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
env = gym.make('CartPole-v0')
env.reset()
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print('Action: ', action, 'Rewards', reward)
if done:
print('Episode finished after {} timesteps'.format(t + 1))
break
num_games = 20000
num_episodes = 201
min_score = 75
def initial_games():
train_data = []
train_scores = []
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
for _ in range(num_episodes):
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
break
if score >= min_score:
train_scores.append(score)
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
def nn_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def train_model(train_data, model=False):
x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][
0]), 1)
y = [i[1] for i in train_data]
if not model:
model = nn_model(input_size=len(x[0]))
model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,
show_metric=True, run_id='openai_learning')
return model
train_data = initial_games()
model = train_model(train_data)
num_final_games = 10
target_episodes = 201
all_rewards = []
all_actions = []
for _ in range(num_final_games):
total_score = 0
prev_obs = []
env.reset()
for _ in range(target_episodes):
if len(prev_obs) == 0:
action = random.randrange(0, 2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(
prev_obs), 1))[0])
all_actions.append(action)
observation, reward, done, info = env.step(action)
prev_obs = observation
total_score += reward
if done:
break
all_rewards.append(total_score)
print('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))
print('Max reward:', max(all_rewards))
<|reserved_special_token_1|>
import gym
import random
import numpy as np
import statistics
from collections import Counter
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
#setup the Cartpole environment
env = gym.make("CartPole-v0")
env.reset()
#----------Explore CartPole-------------#
#exploring the observations, rewards, actions
def explore_cartpole():
for i_episode in range(2):
observation = env.reset()
for t in range(100):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
print("Action: ", action, "Rewards", reward)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
#explore_cartpole()
#----------Collect Training Data-------------#
#collect data from successful games by running x games
#successful would be say, lasting more than 100 frames
num_games = 20000
num_episodes = 201 #game would end at 200 episodes
min_score = 75
def initial_games():
train_data = []
train_scores = []
#running our initial set of games
for _ in range(num_games):
game_data = []
prev_obs = []
score = 0
#running the game, frame by frame
for _ in range(num_episodes):
#choosing actions: randomly
action = random.randrange(0,2)
observation, reward, done, info = env.step(action)
if len(prev_obs) > 0:
game_data.append([prev_obs, action])
prev_obs = observation
score += reward
if done:
#print("Score was: ", score)
break
#if the score was above the threshold
#we will save the game in our training data
#hence training on the better games
if score >= min_score :
train_scores.append(score)
#converting the data into one-hot output
for i in game_data:
if i[1] == 0:
output = [1, 0]
else:
output = [0, 1]
train_data.append([i[0], output])
env.reset()
return train_data
#----------Build the FC NN model-------------#
#building a simple multi-layer fully connected model
#this model can be generally used to play games like cartpole
#would try training the model on other games in OpenAI environment
def nn_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
#----------Train the model-------------#
def train_model(train_data, model=False):
x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1)
y = [i[1] for i in train_data]
if not model:
model = nn_model(input_size = len(x[0]))
model.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500,
show_metric = True, run_id = 'openai_learning')
return model
train_data = initial_games()
#print("Size of training data",len(train_data))
model = train_model(train_data)
#----------Predict actions for the games-------------#
num_final_games = 10
target_episodes = 201
all_rewards = []
all_actions = []
for _ in range(num_final_games):
total_score = 0
prev_obs = []
env.reset()
for _ in range(target_episodes):
#env.render()
#instead of randomly choosing the action, predict the actions
if len(prev_obs) == 0:
action = random.randrange(0,2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])
all_actions.append(action)
#let's run the game
observation, reward, done, info = env.step(action)
prev_obs = observation
total_score += reward
if done:
break
all_rewards.append(total_score)
#----------Print results-------------#
print('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards))
print('Max reward:', max(all_rewards))
|
flexible
|
{
"blob_id": "7789e54acc02fe0277ff80ce14efbcdc4ee6e7f1",
"index": 8009,
"step-1": "<mask token>\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\n<mask token>\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\n<mask token>\n",
"step-2": "<mask token>\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\n<mask token>\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\n<mask token>\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-3": "<mask token>\nenv = gym.make('CartPole-v0')\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\nnum_games = 20000\nnum_episodes = 201\nmin_score = 75\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\ntrain_data = initial_games()\nmodel = train_model(train_data)\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-4": "import gym\nimport random\nimport numpy as np\nimport statistics\nfrom collections import Counter\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\nenv = gym.make('CartPole-v0')\nenv.reset()\n\n\ndef explore_cartpole():\n for i_episode in range(2):\n observation = env.reset()\n for t in range(100):\n env.render()\n print(observation)\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n print('Action: ', action, 'Rewards', reward)\n if done:\n print('Episode finished after {} timesteps'.format(t + 1))\n break\n\n\nnum_games = 20000\nnum_episodes = 201\nmin_score = 75\n\n\ndef initial_games():\n train_data = []\n train_scores = []\n for _ in range(num_games):\n game_data = []\n prev_obs = []\n score = 0\n for _ in range(num_episodes):\n action = random.randrange(0, 2)\n observation, reward, done, info = env.step(action)\n if len(prev_obs) > 0:\n game_data.append([prev_obs, action])\n prev_obs = observation\n score += reward\n if done:\n break\n if score >= min_score:\n train_scores.append(score)\n for i in game_data:\n if i[1] == 0:\n output = [1, 0]\n else:\n output = [0, 1]\n train_data.append([i[0], output])\n env.reset()\n return train_data\n\n\ndef nn_model(input_size):\n network = input_data(shape=[None, input_size, 1], name='input')\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=0.001,\n loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n return model\n\n\ndef train_model(train_data, model=False):\n x = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][\n 0]), 1)\n y = [i[1] for i in train_data]\n if not model:\n model = nn_model(input_size=len(x[0]))\n model.fit({'input': x}, {'targets': y}, n_epoch=5, snapshot_step=500,\n show_metric=True, run_id='openai_learning')\n return model\n\n\ntrain_data = initial_games()\nmodel = train_model(train_data)\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\nfor _ in range(num_final_games):\n total_score = 0\n prev_obs = []\n env.reset()\n for _ in range(target_episodes):\n if len(prev_obs) == 0:\n action = random.randrange(0, 2)\n else:\n action = np.argmax(model.predict(prev_obs.reshape(-1, len(\n prev_obs), 1))[0])\n all_actions.append(action)\n observation, reward, done, info = env.step(action)\n prev_obs = observation\n total_score += reward\n if done:\n break\n all_rewards.append(total_score)\nprint('Average reward:', np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-5": "import gym\nimport random \nimport numpy as np\nimport statistics\nfrom collections import Counter\n\nimport tflearn\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.estimator import regression\n\n#setup the Cartpole environment\nenv = gym.make(\"CartPole-v0\")\nenv.reset()\n\n\n#----------Explore CartPole-------------#\n#exploring the observations, rewards, actions\ndef explore_cartpole():\n\tfor i_episode in range(2):\n\t observation = env.reset()\n\t for t in range(100):\n\t env.render()\n\t print(observation)\n\t action = env.action_space.sample()\n\t observation, reward, done, info = env.step(action)\n\t print(\"Action: \", action, \"Rewards\", reward)\n\t if done:\n\t print(\"Episode finished after {} timesteps\".format(t+1))\n\t break\n\n#explore_cartpole() \n\n#----------Collect Training Data-------------#\n#collect data from successful games by running x games\n#successful would be say, lasting more than 100 frames\nnum_games = 20000\nnum_episodes = 201 #game would end at 200 episodes\nmin_score = 75\n\ndef initial_games():\n\n\ttrain_data = []\n\ttrain_scores = []\n\n\t#running our initial set of games\n\tfor _ in range(num_games):\n\t\tgame_data = []\n\t\tprev_obs = []\n\t\tscore = 0\n\n\t\t#running the game, frame by frame\n\t\tfor _ in range(num_episodes):\n\t\t\t#choosing actions: randomly\n\t\t\taction = random.randrange(0,2)\n\t\t\tobservation, reward, done, info = env.step(action)\n\n\t\t\tif len(prev_obs) > 0: \n\t\t\t\tgame_data.append([prev_obs, action])\n\n\t\t\tprev_obs = observation\n\t\t\tscore += reward\n\n\t\t\tif done:\n\t\t\t\t#print(\"Score was: \", score)\n\t\t\t\tbreak\n\n\t\t#if the score was above the threshold\n\t\t#we will save the game in our training data\n\t\t#hence training on the better games\n\t\tif score >= min_score :\n\t\t\ttrain_scores.append(score)\n\t\t\t#converting the data into one-hot output\t\t\n\t\t\tfor i in game_data:\t\t\t\n\t\t\t\tif i[1] == 0:\n\t\t\t\t\toutput = [1, 0]\n\t\t\t\telse:\n\t\t\t\t\toutput = [0, 1]\n\t\t\t\t\n\t\t\t\ttrain_data.append([i[0], output])\n\n\t\tenv.reset()\n\n\treturn train_data\n\n\n#----------Build the FC NN model-------------#\n#building a simple multi-layer fully connected model\n#this model can be generally used to play games like cartpole\n#would try training the model on other games in OpenAI environment\n\ndef nn_model(input_size):\n\n network = input_data(shape=[None, input_size, 1], name='input')\n\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 512, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 256, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 128, activation='relu')\n network = dropout(network, 0.8)\n\n network = fully_connected(network, 2, activation='softmax')\n network = regression(network, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')\n model = tflearn.DNN(network, tensorboard_dir='log')\n\n return model\n\n\n\n#----------Train the model-------------#\ndef train_model(train_data, model=False):\n\n\tx = np.array([i[0] for i in train_data]).reshape(-1, len(train_data[0][0]),1)\n\ty = [i[1] for i in train_data]\n\n\tif not model:\n\t\tmodel = nn_model(input_size = len(x[0]))\n\n\tmodel.fit({'input': x}, {'targets': y}, n_epoch = 5, snapshot_step=500, \n\t\tshow_metric = True, run_id = 'openai_learning')\n\treturn model\n\ntrain_data = initial_games()\n#print(\"Size of training data\",len(train_data))\n\nmodel = train_model(train_data)\n\n#----------Predict actions for the games-------------#\nnum_final_games = 10\ntarget_episodes = 201\nall_rewards = []\nall_actions = []\n\nfor _ in range(num_final_games):\n\ttotal_score = 0\n\tprev_obs = []\n\tenv.reset()\n\n\tfor _ in range(target_episodes):\n\n\t\t#env.render()\n\n\t\t#instead of randomly choosing the action, predict the actions\n\t\tif len(prev_obs) == 0:\n\t\t\taction = random.randrange(0,2)\n\t\telse:\n\t\t\taction = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])\n\t\t\n\t\tall_actions.append(action)\n\n\t\t#let's run the game\n\t\tobservation, reward, done, info = env.step(action)\n\t\t\n\t\tprev_obs = observation\n\t\ttotal_score += reward\n\n\t\tif done: \n\t\t\tbreak\n\n\tall_rewards.append(total_score)\n\n#----------Print results-------------#\nprint('Average reward:',np.mean(all_rewards), '+-', np.std(all_rewards))\nprint('Max reward:', max(all_rewards))\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
def preprocess_image(image, target_dim):
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
<|reserved_special_token_0|>
def run_style_predict(preprocessed_style_image):
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)
interpreter.invoke()
style_bottleneck = interpreter.tensor(interpreter.get_output_details()[
0]['index'])()
return style_bottleneck
<|reserved_special_token_0|>
def art_grab(term):
content_path = image_grabber.im_grab(term, DISP=0)
content_image = load_img(content_path)
style_image = load_img(style_path)
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
style_bottleneck = run_style_predict(preprocessed_style_image)
stylized_image = run_style_transform(style_bottleneck,
preprocessed_content_image)
if len(stylized_image.shape) > 3:
stylized_image = tf.squeeze(stylized_image, axis=0)
stylized_image = np.array(stylized_image)
return stylized_image
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
def preprocess_image(image, target_dim):
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
<|reserved_special_token_0|>
def run_style_predict(preprocessed_style_image):
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)
interpreter.invoke()
style_bottleneck = interpreter.tensor(interpreter.get_output_details()[
0]['index'])()
return style_bottleneck
<|reserved_special_token_0|>
def run_style_transform(style_bottleneck, preprocessed_content_image):
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'],
preprocessed_content_image)
interpreter.set_tensor(input_details[1]['index'], style_bottleneck)
interpreter.invoke()
stylized_image = interpreter.tensor(interpreter.get_output_details()[0]
['index'])()
return stylized_image
def art_grab(term):
content_path = image_grabber.im_grab(term, DISP=0)
content_image = load_img(content_path)
style_image = load_img(style_path)
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
style_bottleneck = run_style_predict(preprocessed_style_image)
stylized_image = run_style_transform(style_bottleneck,
preprocessed_content_image)
if len(stylized_image.shape) > 3:
stylized_image = tf.squeeze(stylized_image, axis=0)
stylized_image = np.array(stylized_image)
return stylized_image
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').setLevel(logging.FATAL)
<|reserved_special_token_0|>
print(tf.__version__)
<|reserved_special_token_0|>
mpl.rcParams['figure.figsize'] = 12, 12
mpl.rcParams['axes.grid'] = False
<|reserved_special_token_0|>
paths = ['data/style01.jpg', 'data/style02.jpg', 'data/style03.jpg']
style_path = random.choice(paths)
style_predict_path = tf.keras.utils.get_file('style_predict.tflite',
'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite'
)
style_transform_path = tf.keras.utils.get_file('style_transform.tflite',
'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite'
)
<|reserved_special_token_0|>
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
def preprocess_image(image, target_dim):
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
<|reserved_special_token_0|>
def run_style_predict(preprocessed_style_image):
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)
interpreter.invoke()
style_bottleneck = interpreter.tensor(interpreter.get_output_details()[
0]['index'])()
return style_bottleneck
<|reserved_special_token_0|>
def run_style_transform(style_bottleneck, preprocessed_content_image):
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'],
preprocessed_content_image)
interpreter.set_tensor(input_details[1]['index'], style_bottleneck)
interpreter.invoke()
stylized_image = interpreter.tensor(interpreter.get_output_details()[0]
['index'])()
return stylized_image
def art_grab(term):
content_path = image_grabber.im_grab(term, DISP=0)
content_image = load_img(content_path)
style_image = load_img(style_path)
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
style_bottleneck = run_style_predict(preprocessed_style_image)
stylized_image = run_style_transform(style_bottleneck,
preprocessed_content_image)
if len(stylized_image.shape) > 3:
stylized_image = tf.squeeze(stylized_image, axis=0)
stylized_image = np.array(stylized_image)
return stylized_image
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
print(tf.__version__)
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = 12, 12
mpl.rcParams['axes.grid'] = False
import numpy as np
import time
import functools
import image_grabber
import def_grabber
import cv2
import random
<|reserved_special_token_0|>
paths = ['data/style01.jpg', 'data/style02.jpg', 'data/style03.jpg']
style_path = random.choice(paths)
style_predict_path = tf.keras.utils.get_file('style_predict.tflite',
'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite'
)
style_transform_path = tf.keras.utils.get_file('style_transform.tflite',
'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite'
)
<|reserved_special_token_0|>
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
def preprocess_image(image, target_dim):
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
<|reserved_special_token_0|>
def run_style_predict(preprocessed_style_image):
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)
interpreter.invoke()
style_bottleneck = interpreter.tensor(interpreter.get_output_details()[
0]['index'])()
return style_bottleneck
<|reserved_special_token_0|>
def run_style_transform(style_bottleneck, preprocessed_content_image):
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'],
preprocessed_content_image)
interpreter.set_tensor(input_details[1]['index'], style_bottleneck)
interpreter.invoke()
stylized_image = interpreter.tensor(interpreter.get_output_details()[0]
['index'])()
return stylized_image
def art_grab(term):
content_path = image_grabber.im_grab(term, DISP=0)
content_image = load_img(content_path)
style_image = load_img(style_path)
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
style_bottleneck = run_style_predict(preprocessed_style_image)
stylized_image = run_style_transform(style_bottleneck,
preprocessed_content_image)
if len(stylized_image.shape) > 3:
stylized_image = tf.squeeze(stylized_image, axis=0)
stylized_image = np.array(stylized_image)
return stylized_image
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""overview.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/examples/style_transfer/overview.ipynb
##### Copyright 2019 The TensorFlow Authors.
"""
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
import tensorflow as tf
print(tf.__version__)
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import time
import functools
import image_grabber
import def_grabber
import cv2
import random
"""Download the content and style images, and the pre-trained TensorFlow Lite models."""
paths = ["data/style01.jpg", "data/style02.jpg", "data/style03.jpg"]
style_path = random.choice(paths)
style_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite')
style_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite')
"""## Pre-process the inputs
* The content image and the style image must be RGB images with pixel values being float32 numbers between [0..1].
* The style image size must be (1, 256, 256, 3). We central crop the image and resize it.
* The content image must be (1, 384, 384, 3). We central crop the image and resize it.
"""
# Function to load an image from a file, and add a batch dimension.
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
# Function to pre-process by resizing an central cropping it.
def preprocess_image(image, target_dim):
# Resize the image so that the shorter dimension becomes 256px.
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
# Central crop the image.
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
"""## Run style transfer with TensorFlow Lite
### Style prediction
"""
# Function to run style prediction on preprocessed style image.
def run_style_predict(preprocessed_style_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
# Set model input.
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], preprocessed_style_image)
# Calculate style bottleneck.
interpreter.invoke()
style_bottleneck = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return style_bottleneck
"""### Style transform"""
# Run style transform on preprocessed style image
def run_style_transform(style_bottleneck, preprocessed_content_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
# Set model input.
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
# Set model inputs.
interpreter.set_tensor(input_details[0]["index"], preprocessed_content_image)
interpreter.set_tensor(input_details[1]["index"], style_bottleneck)
interpreter.invoke()
# Transform content image.
stylized_image = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return stylized_image
def art_grab(term):
content_path = image_grabber.im_grab(term, DISP=0)
# Load the input images.
content_image = load_img(content_path)
style_image = load_img(style_path)
# Preprocess the input images.
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
# Calculate style bottleneck for the preprocessed style image.
style_bottleneck = run_style_predict(preprocessed_style_image)
# Stylize the content image using the style bottleneck.
stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image)
# Visualize the output.
#imshow(stylized_image, 'Stylized Image')
if len(stylized_image.shape) > 3:
stylized_image = tf.squeeze(stylized_image, axis=0)
stylized_image = np.array(stylized_image)
return stylized_image
|
flexible
|
{
"blob_id": "36ce0de4cb760632959392a9f982532436bd37b0",
"index": 7272,
"step-1": "<mask token>\n\n\ndef load_img(path_to_img):\n img = tf.io.read_file(path_to_img)\n img = tf.io.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = img[tf.newaxis, :]\n return img\n\n\ndef preprocess_image(image, target_dim):\n shape = tf.cast(tf.shape(image)[1:-1], tf.float32)\n short_dim = min(shape)\n scale = target_dim / short_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n image = tf.image.resize(image, new_shape)\n image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)\n return image\n\n\n<mask token>\n\n\ndef run_style_predict(preprocessed_style_image):\n interpreter = tf.lite.Interpreter(model_path=style_predict_path)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)\n interpreter.invoke()\n style_bottleneck = interpreter.tensor(interpreter.get_output_details()[\n 0]['index'])()\n return style_bottleneck\n\n\n<mask token>\n\n\ndef art_grab(term):\n content_path = image_grabber.im_grab(term, DISP=0)\n content_image = load_img(content_path)\n style_image = load_img(style_path)\n preprocessed_content_image = preprocess_image(content_image, 384)\n preprocessed_style_image = preprocess_image(style_image, 256)\n style_bottleneck = run_style_predict(preprocessed_style_image)\n stylized_image = run_style_transform(style_bottleneck,\n preprocessed_content_image)\n if len(stylized_image.shape) > 3:\n stylized_image = tf.squeeze(stylized_image, axis=0)\n stylized_image = np.array(stylized_image)\n return stylized_image\n",
"step-2": "<mask token>\n\n\ndef load_img(path_to_img):\n img = tf.io.read_file(path_to_img)\n img = tf.io.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = img[tf.newaxis, :]\n return img\n\n\ndef preprocess_image(image, target_dim):\n shape = tf.cast(tf.shape(image)[1:-1], tf.float32)\n short_dim = min(shape)\n scale = target_dim / short_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n image = tf.image.resize(image, new_shape)\n image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)\n return image\n\n\n<mask token>\n\n\ndef run_style_predict(preprocessed_style_image):\n interpreter = tf.lite.Interpreter(model_path=style_predict_path)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)\n interpreter.invoke()\n style_bottleneck = interpreter.tensor(interpreter.get_output_details()[\n 0]['index'])()\n return style_bottleneck\n\n\n<mask token>\n\n\ndef run_style_transform(style_bottleneck, preprocessed_content_image):\n interpreter = tf.lite.Interpreter(model_path=style_transform_path)\n input_details = interpreter.get_input_details()\n interpreter.allocate_tensors()\n interpreter.set_tensor(input_details[0]['index'],\n preprocessed_content_image)\n interpreter.set_tensor(input_details[1]['index'], style_bottleneck)\n interpreter.invoke()\n stylized_image = interpreter.tensor(interpreter.get_output_details()[0]\n ['index'])()\n return stylized_image\n\n\ndef art_grab(term):\n content_path = image_grabber.im_grab(term, DISP=0)\n content_image = load_img(content_path)\n style_image = load_img(style_path)\n preprocessed_content_image = preprocess_image(content_image, 384)\n preprocessed_style_image = preprocess_image(style_image, 256)\n style_bottleneck = run_style_predict(preprocessed_style_image)\n stylized_image = run_style_transform(style_bottleneck,\n preprocessed_content_image)\n if len(stylized_image.shape) > 3:\n stylized_image = tf.squeeze(stylized_image, axis=0)\n stylized_image = np.array(stylized_image)\n return stylized_image\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\n<mask token>\nprint(tf.__version__)\n<mask token>\nmpl.rcParams['figure.figsize'] = 12, 12\nmpl.rcParams['axes.grid'] = False\n<mask token>\npaths = ['data/style01.jpg', 'data/style02.jpg', 'data/style03.jpg']\nstyle_path = random.choice(paths)\nstyle_predict_path = tf.keras.utils.get_file('style_predict.tflite',\n 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite'\n )\nstyle_transform_path = tf.keras.utils.get_file('style_transform.tflite',\n 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite'\n )\n<mask token>\n\n\ndef load_img(path_to_img):\n img = tf.io.read_file(path_to_img)\n img = tf.io.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = img[tf.newaxis, :]\n return img\n\n\ndef preprocess_image(image, target_dim):\n shape = tf.cast(tf.shape(image)[1:-1], tf.float32)\n short_dim = min(shape)\n scale = target_dim / short_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n image = tf.image.resize(image, new_shape)\n image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)\n return image\n\n\n<mask token>\n\n\ndef run_style_predict(preprocessed_style_image):\n interpreter = tf.lite.Interpreter(model_path=style_predict_path)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)\n interpreter.invoke()\n style_bottleneck = interpreter.tensor(interpreter.get_output_details()[\n 0]['index'])()\n return style_bottleneck\n\n\n<mask token>\n\n\ndef run_style_transform(style_bottleneck, preprocessed_content_image):\n interpreter = tf.lite.Interpreter(model_path=style_transform_path)\n input_details = interpreter.get_input_details()\n interpreter.allocate_tensors()\n interpreter.set_tensor(input_details[0]['index'],\n preprocessed_content_image)\n interpreter.set_tensor(input_details[1]['index'], style_bottleneck)\n interpreter.invoke()\n stylized_image = interpreter.tensor(interpreter.get_output_details()[0]\n ['index'])()\n return stylized_image\n\n\ndef art_grab(term):\n content_path = image_grabber.im_grab(term, DISP=0)\n content_image = load_img(content_path)\n style_image = load_img(style_path)\n preprocessed_content_image = preprocess_image(content_image, 384)\n preprocessed_style_image = preprocess_image(style_image, 256)\n style_bottleneck = run_style_predict(preprocessed_style_image)\n stylized_image = run_style_transform(style_bottleneck,\n preprocessed_content_image)\n if len(stylized_image.shape) > 3:\n stylized_image = tf.squeeze(stylized_image, axis=0)\n stylized_image = np.array(stylized_image)\n return stylized_image\n",
"step-4": "<mask token>\nimport logging\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\nimport tensorflow as tf\nprint(tf.__version__)\nimport IPython.display as display\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['figure.figsize'] = 12, 12\nmpl.rcParams['axes.grid'] = False\nimport numpy as np\nimport time\nimport functools\nimport image_grabber\nimport def_grabber\nimport cv2\nimport random\n<mask token>\npaths = ['data/style01.jpg', 'data/style02.jpg', 'data/style03.jpg']\nstyle_path = random.choice(paths)\nstyle_predict_path = tf.keras.utils.get_file('style_predict.tflite',\n 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite'\n )\nstyle_transform_path = tf.keras.utils.get_file('style_transform.tflite',\n 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite'\n )\n<mask token>\n\n\ndef load_img(path_to_img):\n img = tf.io.read_file(path_to_img)\n img = tf.io.decode_image(img, channels=3)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = img[tf.newaxis, :]\n return img\n\n\ndef preprocess_image(image, target_dim):\n shape = tf.cast(tf.shape(image)[1:-1], tf.float32)\n short_dim = min(shape)\n scale = target_dim / short_dim\n new_shape = tf.cast(shape * scale, tf.int32)\n image = tf.image.resize(image, new_shape)\n image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)\n return image\n\n\n<mask token>\n\n\ndef run_style_predict(preprocessed_style_image):\n interpreter = tf.lite.Interpreter(model_path=style_predict_path)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], preprocessed_style_image)\n interpreter.invoke()\n style_bottleneck = interpreter.tensor(interpreter.get_output_details()[\n 0]['index'])()\n return style_bottleneck\n\n\n<mask token>\n\n\ndef run_style_transform(style_bottleneck, preprocessed_content_image):\n interpreter = tf.lite.Interpreter(model_path=style_transform_path)\n input_details = interpreter.get_input_details()\n interpreter.allocate_tensors()\n interpreter.set_tensor(input_details[0]['index'],\n preprocessed_content_image)\n interpreter.set_tensor(input_details[1]['index'], style_bottleneck)\n interpreter.invoke()\n stylized_image = interpreter.tensor(interpreter.get_output_details()[0]\n ['index'])()\n return stylized_image\n\n\ndef art_grab(term):\n content_path = image_grabber.im_grab(term, DISP=0)\n content_image = load_img(content_path)\n style_image = load_img(style_path)\n preprocessed_content_image = preprocess_image(content_image, 384)\n preprocessed_style_image = preprocess_image(style_image, 256)\n style_bottleneck = run_style_predict(preprocessed_style_image)\n stylized_image = run_style_transform(style_bottleneck,\n preprocessed_content_image)\n if len(stylized_image.shape) > 3:\n stylized_image = tf.squeeze(stylized_image, axis=0)\n stylized_image = np.array(stylized_image)\n return stylized_image\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"overview.ipynb\r\n\r\nAutomatically generated by Colaboratory.\r\n\r\nOriginal file is located at\r\n https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/examples/style_transfer/overview.ipynb\r\n\r\n##### Copyright 2019 The TensorFlow Authors.\r\n\"\"\"\r\n\r\n#@title Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\nimport logging\r\nimport os\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL\r\nlogging.getLogger('tensorflow').setLevel(logging.FATAL)\r\n\r\nimport tensorflow as tf\r\nprint(tf.__version__)\r\n\r\nimport IPython.display as display\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nmpl.rcParams['figure.figsize'] = (12,12)\r\nmpl.rcParams['axes.grid'] = False\r\n\r\nimport numpy as np\r\nimport time\r\nimport functools\r\n\r\nimport image_grabber\r\nimport def_grabber\r\nimport cv2\r\n\r\nimport random\r\n\r\n\r\n\"\"\"Download the content and style images, and the pre-trained TensorFlow Lite models.\"\"\"\r\npaths = [\"data/style01.jpg\", \"data/style02.jpg\", \"data/style03.jpg\"]\r\nstyle_path = random.choice(paths)\r\n\r\nstyle_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite')\r\nstyle_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite')\r\n\r\n\"\"\"## Pre-process the inputs\r\n\r\n* The content image and the style image must be RGB images with pixel values being float32 numbers between [0..1].\r\n* The style image size must be (1, 256, 256, 3). We central crop the image and resize it.\r\n* The content image must be (1, 384, 384, 3). We central crop the image and resize it.\r\n\"\"\"\r\n\r\n# Function to load an image from a file, and add a batch dimension.\r\ndef load_img(path_to_img):\r\n img = tf.io.read_file(path_to_img)\r\n img = tf.io.decode_image(img, channels=3)\r\n img = tf.image.convert_image_dtype(img, tf.float32)\r\n img = img[tf.newaxis, :]\r\n\r\n return img\r\n\r\n# Function to pre-process by resizing an central cropping it.\r\ndef preprocess_image(image, target_dim):\r\n # Resize the image so that the shorter dimension becomes 256px.\r\n shape = tf.cast(tf.shape(image)[1:-1], tf.float32)\r\n short_dim = min(shape)\r\n scale = target_dim / short_dim\r\n new_shape = tf.cast(shape * scale, tf.int32)\r\n image = tf.image.resize(image, new_shape)\r\n\r\n # Central crop the image.\r\n image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)\r\n\r\n return image\r\n\r\n\"\"\"## Run style transfer with TensorFlow Lite\r\n\r\n### Style prediction\r\n\"\"\"\r\n# Function to run style prediction on preprocessed style image.\r\ndef run_style_predict(preprocessed_style_image):\r\n # Load the model.\r\n interpreter = tf.lite.Interpreter(model_path=style_predict_path)\r\n\r\n # Set model input.\r\n interpreter.allocate_tensors()\r\n input_details = interpreter.get_input_details()\r\n interpreter.set_tensor(input_details[0][\"index\"], preprocessed_style_image)\r\n\r\n # Calculate style bottleneck.\r\n interpreter.invoke()\r\n style_bottleneck = interpreter.tensor(\r\n interpreter.get_output_details()[0][\"index\"]\r\n )()\r\n\r\n return style_bottleneck\r\n\r\n\"\"\"### Style transform\"\"\"\r\n\r\n# Run style transform on preprocessed style image\r\ndef run_style_transform(style_bottleneck, preprocessed_content_image):\r\n # Load the model.\r\n interpreter = tf.lite.Interpreter(model_path=style_transform_path)\r\n\r\n # Set model input.\r\n input_details = interpreter.get_input_details()\r\n interpreter.allocate_tensors()\r\n\r\n # Set model inputs.\r\n interpreter.set_tensor(input_details[0][\"index\"], preprocessed_content_image)\r\n interpreter.set_tensor(input_details[1][\"index\"], style_bottleneck)\r\n interpreter.invoke()\r\n\r\n # Transform content image.\r\n stylized_image = interpreter.tensor(\r\n interpreter.get_output_details()[0][\"index\"]\r\n )()\r\n\r\n return stylized_image\r\n\r\ndef art_grab(term):\r\n content_path = image_grabber.im_grab(term, DISP=0)\r\n\r\n # Load the input images.\r\n content_image = load_img(content_path)\r\n style_image = load_img(style_path)\r\n\r\n # Preprocess the input images.\r\n preprocessed_content_image = preprocess_image(content_image, 384)\r\n preprocessed_style_image = preprocess_image(style_image, 256)\r\n\r\n # Calculate style bottleneck for the preprocessed style image.\r\n style_bottleneck = run_style_predict(preprocessed_style_image)\r\n\r\n # Stylize the content image using the style bottleneck.\r\n stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image)\r\n\r\n # Visualize the output.\r\n #imshow(stylized_image, 'Stylized Image')\r\n if len(stylized_image.shape) > 3:\r\n stylized_image = tf.squeeze(stylized_image, axis=0)\r\n stylized_image = np.array(stylized_image)\r\n\r\n return stylized_image\r\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
import ATLAS1
import ATLAS_v2
from atlas.config import dbConfig
import pandas as pd
import ContentCategories
import NgramMapping
import SentimentAnalysis_2
import TrigDriv_2
import TopicModeling
import logging
import traceback
from StringIO import StringIO
from atlas.models import Requests
def caller_file(full_data_dict):
#print(full_data_dict)
request = full_data_dict['filename_obj']
print("Entering File analysis", request)
filecontents = full_data_dict['file_data']
# print("filecontents:", filecontents)
# tag_dict = full_data_dict['tag_dict']
#db = pymongo.MongoClient().atlas
#s = request.encode('utf-8')
df = pd.read_csv(dbConfig.dict["requestUrl"], encoding='utf-8')
status_dict = {'status': None, "senti_list": None, 'td_list': None}
print("going to read file contents into df.")
file_contents_df = pd.read_csv(StringIO(filecontents), encoding='utf-8')
print("file contents read into df.")
if "pCategory" in file_contents_df.columns.values.tolist():
print("Calling Atlas1.main2()")
status = ATLAS1.main2(request, filecontents, full_data_dict['tag_dict'])
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '15% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "15% complete"
# file_dict = {
# '_id': binascii.hexlify(s),
# 'Product': request,
#
# 'metadata': {
# '_id': binascii.hexlify(s),
# 'lastUpdated': datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M:%S %p"),
# 'name': request
# },
# 'analyticData': {
# 'sentimentData': [
#
# ],
# 'trigdrivData': {
#
# }
# }
# }
# result = db.data.insert_one(file_dict)
# sent_list = SentimentAPI_generic.senti_main(dbConfig.dict['uploadsUrl'] + request, ',')
# print sent_list
#
# target_string = "analyticData.sentimentData"
#
# db.data.update({"_id": binascii.hexlify(s)}, {"$set": {target_string: sent_list[0]}})
# print result.inserted_id
# Calling analyses files - sentiment, trigger/driver and topic modelling
try:
print("Now classifying content categories")
cc_list = ContentCategories.main(request)
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '35% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "35% complete"
except:
print("Error while classifying content categories")
print(traceback.print_exc())
# Calling analyses files - sentiment, trigger/driver and topic modelling
try:
print("Now tagging the dataset")
tagop_list = NgramMapping.main2(request, full_data_dict['tag_dict'])
#tagop_list = NgramMapping.main2("headphones", full_data_dict['tag_dict'])
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '50% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "50% complete"
except:
print("Error while tagging dataset with dictionary")
print(traceback.print_exc())
try:
print("Calling sentiment analyses to run on uploaded file...")
sent_list = SentimentAnalysis_2.senti_main2(request, filecontents, full_data_dict['senti_dict'])
#print sent_list
print("Sentiment data inserted into DB")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '65% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "65% complete"
except:
print("Error while analysing sentiment")
#print(traceback.print_exc())
try:
td_list = TrigDriv_2.td_main2(request, full_data_dict['td_dict'])
#print td_list
print("TriggerDriver data inserted into DB")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '80% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "80% complete"
except:
print("Error while analysing triggers/drivers")
#print(traceback.print_exc())
else:
print("Calling Atlas1.main3()")
# if 'supplements_10k_1' not in request:
status = ATLAS1.main3(request, filecontents, full_data_dict['tag_dict'])
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '15% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "15% complete"
# Calling analyses files - sentiment, trigger/driver and topic modelling
try:
print("Now classifying content categories")
cc_list = ContentCategories.main(request)
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '35% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "35% complete"
except:
print("Error while classifying content categories")
print(traceback.print_exc())
# Calling analyses files - sentiment, trigger/driver and topic modelling
try:
print("Now tagging the dataset with the dictionary provided")
tagop_list = NgramMapping.main3(request, full_data_dict['file_data'], full_data_dict['tag_dict'])
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '50% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "50% complete"
except:
print("Error while tagging dataset with dictionary")
print(traceback.print_exc())
try:
print("Calling sentiment analyses to run on uploaded file...")
sent_list = SentimentAnalysis_2.senti_main3(request, filecontents, full_data_dict['senti_dict'])
# print sent_list
print("Sentiment data inserted into DB")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '65% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "65% complete"
except:
print("Error while analysing sentiment")
# print(traceback.print_exc())
try:
td_list = TrigDriv_2.td_main3(request, full_data_dict['td_dict'])
# print td_list
print("TriggerDriver data inserted into DB")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '80% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "80% complete"
except:
print("Error while analysing triggers/drivers")
# print(traceback.print_exc())
# else:
# try:
# print("Now tagging the supplements dataset with the dictionary provided")
# tagop_list = NgramMapping.main3(request, full_data_dict['file_data'], full_data_dict['tag_dict'])
# except:
# print("Error while tagging supplement dataset with dictionary")
# print(traceback.print_exc())
print "Going to topic model"
# Performing Topic Modeling Analysis
num_topics = 8
topic_status = TopicModeling.main(request, num_topics)
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = 'Complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
df.ix[(df.reqKw == request), 'reqStatus'] = "Complete"
# if status == 200 and sent_list == 200 and td_list == 200 and topic_status == 200:
# # Update request csv status to completed
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Completed"
# elif status == 200 and sent_list == 200 and td_list == 200:
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Topic Modelling Failed"
# elif status == 200 and sent_list == 200:
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Trigger/Driver Failed"
# elif status == 200:
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Sentiment Failed"
# else:
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Scraping incomplete"
with open(dbConfig.dict["requestUrl"], 'w') as f:
df.to_csv(f, index=False)
print("Exiting return")
return request
def caller(request, site, full_data_dict):
print(full_data_dict['tag_dict']) # dict with default dict urls for automatic scraped data tagging
print("Entering", request, site)
# df = pd.read_csv(dbConfig.dict["requestUrl"], encoding='utf-8')
# db = pymongo.MongoClient().atlas
# s = request.encode('utf-8')
status = ATLAS_v2.main(request, site)
print("Atlas main finish")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '15% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
# df.ix[(df.reqKw == request), 'reqStatus'] = "20% complete"
# Calling analyses files - sentiment, trigger/driver and topic modelling
try:
print("Now classifying content categories")
cc_list = ContentCategories.main(request)
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '35% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
# df.ix[(df.reqKw == request), 'reqStatus'] = "40% complete"
except:
print("Error while classifying content categories!")
print(traceback.print_exc())
# Calling analyses files - sentiment, trigger/driver and topic modelling
try:
print("Now tagging the dataset...")
tagop_list = NgramMapping.main(request, full_data_dict['tag_dict'])
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '50% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
# df.ix[(df.reqKw == request), 'reqStatus'] = "40% complete"
except:
print("Error while tagging dataset with dictionary")
print(traceback.print_exc())
try:
sent_list = SentimentAnalysis_2.senti_main(request)
#print sent_list
print("Sentiment data inserted into DB")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '65% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
# df.ix[(df.reqKw == request), 'reqStatus'] = "60% complete"
except:
print("Error while analysing sentiment")
print(traceback.print_exc())
try:
td_list = TrigDriv_2.td_main(request)
#print td_list
print("TriggerDriver data inserted into DB")
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = '80% complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
# df.ix[(df.reqKw == request), 'reqStatus'] = "80% complete"
except:
print("Error while analysing triggers/drivers")
print(traceback.print_exc())
print "Going to topic model"
#logging.info("going to topicmodeling.main")
#
#Performing Topic Modeling Analysis
num_topics = 8
topic_status = TopicModeling.main(request, num_topics)
# df = pd.read_csv(dbConfig.dict["requestUrl"], encoding='utf-8')
# if status == 200 & sent_list[1] == 200 & topic_status == 200:
# # Update request csv status to completed
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Completed"
# else:
# df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = "Failed"
try:
req_obj = Requests.objects.get(reqKw=request)
req_obj.reqStatus = 'Complete'
req_obj.save()
except:
print("Couldn't save status update in DB!")
print(traceback.print_exc())
# df.ix[(df.reqKw == request), 'reqStatus'] = "Complete"
# with open(dbConfig.dict["requestUrl"], 'w') as f:
# df.to_csv(f, index=False)
print("Exiting Return")
return request
pool = ProcessPoolExecutor()
def pool_exe(request, site, full_data_dict): # to Rev
future = pool.submit(caller, request, site, full_data_dict)
print ("Exit pool exe\n")
#def pool_exe_file(request,filecontents):
# future = pool.submit(caller_file, request, filecontents)
# print("Exit file pool exe\n")
def pool_exe_file(full_data_dict): # to Upl, Soc
future = pool.submit(caller_file, full_data_dict)
print("Exit file pool exe\n")
|
normal
|
{
"blob_id": "41698e9d8349ddf3f42aa3d4fc405c69077d1aa3",
"index": 3160,
"step-1": "from concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import ProcessPoolExecutor\nimport ATLAS1\nimport ATLAS_v2\nfrom atlas.config import dbConfig\nimport pandas as pd\nimport ContentCategories\nimport NgramMapping\nimport SentimentAnalysis_2\nimport TrigDriv_2\nimport TopicModeling\nimport logging\nimport traceback\nfrom StringIO import StringIO\nfrom atlas.models import Requests\n\n\ndef caller_file(full_data_dict):\n #print(full_data_dict)\n request = full_data_dict['filename_obj']\n print(\"Entering File analysis\", request)\n filecontents = full_data_dict['file_data']\n # print(\"filecontents:\", filecontents)\n # tag_dict = full_data_dict['tag_dict']\n\n #db = pymongo.MongoClient().atlas\n #s = request.encode('utf-8')\n\n df = pd.read_csv(dbConfig.dict[\"requestUrl\"], encoding='utf-8')\n status_dict = {'status': None, \"senti_list\": None, 'td_list': None}\n print(\"going to read file contents into df.\")\n file_contents_df = pd.read_csv(StringIO(filecontents), encoding='utf-8')\n print(\"file contents read into df.\")\n\n if \"pCategory\" in file_contents_df.columns.values.tolist():\n print(\"Calling Atlas1.main2()\")\n status = ATLAS1.main2(request, filecontents, full_data_dict['tag_dict'])\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '15% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"15% complete\"\n\n # file_dict = {\n # '_id': binascii.hexlify(s),\n # 'Product': request,\n #\n # 'metadata': {\n # '_id': binascii.hexlify(s),\n # 'lastUpdated': datetime.datetime.now().strftime(\"%A, %d. %B %Y %I:%M:%S %p\"),\n # 'name': request\n # },\n # 'analyticData': {\n # 'sentimentData': [\n #\n # ],\n # 'trigdrivData': {\n #\n # }\n # }\n # }\n # result = db.data.insert_one(file_dict)\n # sent_list = SentimentAPI_generic.senti_main(dbConfig.dict['uploadsUrl'] + request, ',')\n # print sent_list\n #\n # target_string = \"analyticData.sentimentData\"\n #\n # db.data.update({\"_id\": binascii.hexlify(s)}, {\"$set\": {target_string: sent_list[0]}})\n # print result.inserted_id\n\n # Calling analyses files - sentiment, trigger/driver and topic modelling\n try:\n print(\"Now classifying content categories\")\n cc_list = ContentCategories.main(request)\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '35% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"35% complete\"\n except:\n print(\"Error while classifying content categories\")\n print(traceback.print_exc())\n\n # Calling analyses files - sentiment, trigger/driver and topic modelling\n try:\n print(\"Now tagging the dataset\")\n tagop_list = NgramMapping.main2(request, full_data_dict['tag_dict'])\n #tagop_list = NgramMapping.main2(\"headphones\", full_data_dict['tag_dict'])\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '50% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"50% complete\"\n except:\n print(\"Error while tagging dataset with dictionary\")\n print(traceback.print_exc())\n\n try:\n print(\"Calling sentiment analyses to run on uploaded file...\")\n sent_list = SentimentAnalysis_2.senti_main2(request, filecontents, full_data_dict['senti_dict'])\n #print sent_list\n print(\"Sentiment data inserted into DB\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '65% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"65% complete\"\n\n except:\n print(\"Error while analysing sentiment\")\n #print(traceback.print_exc())\n\n try:\n td_list = TrigDriv_2.td_main2(request, full_data_dict['td_dict'])\n #print td_list\n print(\"TriggerDriver data inserted into DB\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '80% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"80% complete\"\n except:\n print(\"Error while analysing triggers/drivers\")\n #print(traceback.print_exc())\n\n else:\n print(\"Calling Atlas1.main3()\")\n # if 'supplements_10k_1' not in request:\n status = ATLAS1.main3(request, filecontents, full_data_dict['tag_dict'])\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '15% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"15% complete\"\n\n # Calling analyses files - sentiment, trigger/driver and topic modelling\n try:\n print(\"Now classifying content categories\")\n cc_list = ContentCategories.main(request)\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '35% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"35% complete\"\n except:\n print(\"Error while classifying content categories\")\n print(traceback.print_exc())\n\n # Calling analyses files - sentiment, trigger/driver and topic modelling\n try:\n print(\"Now tagging the dataset with the dictionary provided\")\n tagop_list = NgramMapping.main3(request, full_data_dict['file_data'], full_data_dict['tag_dict'])\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '50% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"50% complete\"\n except:\n print(\"Error while tagging dataset with dictionary\")\n print(traceback.print_exc())\n\n try:\n print(\"Calling sentiment analyses to run on uploaded file...\")\n sent_list = SentimentAnalysis_2.senti_main3(request, filecontents, full_data_dict['senti_dict'])\n # print sent_list\n print(\"Sentiment data inserted into DB\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '65% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"65% complete\"\n\n except:\n print(\"Error while analysing sentiment\")\n # print(traceback.print_exc())\n\n try:\n td_list = TrigDriv_2.td_main3(request, full_data_dict['td_dict'])\n # print td_list\n print(\"TriggerDriver data inserted into DB\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '80% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"80% complete\"\n except:\n print(\"Error while analysing triggers/drivers\")\n # print(traceback.print_exc())\n # else:\n # try:\n # print(\"Now tagging the supplements dataset with the dictionary provided\")\n # tagop_list = NgramMapping.main3(request, full_data_dict['file_data'], full_data_dict['tag_dict'])\n # except:\n # print(\"Error while tagging supplement dataset with dictionary\")\n # print(traceback.print_exc())\n\n print \"Going to topic model\"\n # Performing Topic Modeling Analysis\n num_topics = 8\n topic_status = TopicModeling.main(request, num_topics)\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = 'Complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n df.ix[(df.reqKw == request), 'reqStatus'] = \"Complete\"\n\n # if status == 200 and sent_list == 200 and td_list == 200 and topic_status == 200:\n # # Update request csv status to completed\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Completed\"\n # elif status == 200 and sent_list == 200 and td_list == 200:\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Topic Modelling Failed\"\n # elif status == 200 and sent_list == 200:\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Trigger/Driver Failed\"\n # elif status == 200:\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Sentiment Failed\"\n # else:\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Scraping incomplete\"\n\n with open(dbConfig.dict[\"requestUrl\"], 'w') as f:\n df.to_csv(f, index=False)\n\n print(\"Exiting return\")\n return request\n\n\ndef caller(request, site, full_data_dict):\n print(full_data_dict['tag_dict']) # dict with default dict urls for automatic scraped data tagging\n print(\"Entering\", request, site)\n # df = pd.read_csv(dbConfig.dict[\"requestUrl\"], encoding='utf-8')\n # db = pymongo.MongoClient().atlas\n # s = request.encode('utf-8')\n\n status = ATLAS_v2.main(request, site)\n print(\"Atlas main finish\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '15% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n # df.ix[(df.reqKw == request), 'reqStatus'] = \"20% complete\"\n\n # Calling analyses files - sentiment, trigger/driver and topic modelling\n try:\n print(\"Now classifying content categories\")\n cc_list = ContentCategories.main(request)\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '35% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n # df.ix[(df.reqKw == request), 'reqStatus'] = \"40% complete\"\n except:\n print(\"Error while classifying content categories!\")\n print(traceback.print_exc())\n\n # Calling analyses files - sentiment, trigger/driver and topic modelling\n try:\n print(\"Now tagging the dataset...\")\n tagop_list = NgramMapping.main(request, full_data_dict['tag_dict'])\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '50% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n # df.ix[(df.reqKw == request), 'reqStatus'] = \"40% complete\"\n except:\n print(\"Error while tagging dataset with dictionary\")\n print(traceback.print_exc())\n\n try:\n sent_list = SentimentAnalysis_2.senti_main(request)\n #print sent_list\n print(\"Sentiment data inserted into DB\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '65% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n # df.ix[(df.reqKw == request), 'reqStatus'] = \"60% complete\"\n except:\n print(\"Error while analysing sentiment\")\n print(traceback.print_exc())\n\n\n try:\n td_list = TrigDriv_2.td_main(request)\n #print td_list\n print(\"TriggerDriver data inserted into DB\")\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = '80% complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n # df.ix[(df.reqKw == request), 'reqStatus'] = \"80% complete\"\n except:\n print(\"Error while analysing triggers/drivers\")\n print(traceback.print_exc())\n\n print \"Going to topic model\"\n #logging.info(\"going to topicmodeling.main\")\n #\n #Performing Topic Modeling Analysis\n num_topics = 8\n topic_status = TopicModeling.main(request, num_topics)\n\n # df = pd.read_csv(dbConfig.dict[\"requestUrl\"], encoding='utf-8')\n # if status == 200 & sent_list[1] == 200 & topic_status == 200:\n # # Update request csv status to completed\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Completed\"\n # else:\n # df.ix[(df.reqKw == request) & (df.reqStatus == 'Pending'), 'reqStatus'] = \"Failed\"\n try:\n req_obj = Requests.objects.get(reqKw=request)\n req_obj.reqStatus = 'Complete'\n req_obj.save()\n except:\n print(\"Couldn't save status update in DB!\")\n print(traceback.print_exc())\n # df.ix[(df.reqKw == request), 'reqStatus'] = \"Complete\"\n # with open(dbConfig.dict[\"requestUrl\"], 'w') as f:\n # df.to_csv(f, index=False)\n\n print(\"Exiting Return\")\n return request\n\n\npool = ProcessPoolExecutor()\n\n\ndef pool_exe(request, site, full_data_dict): # to Rev\n future = pool.submit(caller, request, site, full_data_dict)\n print (\"Exit pool exe\\n\")\n\n\n#def pool_exe_file(request,filecontents):\n# future = pool.submit(caller_file, request, filecontents)\n# print(\"Exit file pool exe\\n\")\n\n\ndef pool_exe_file(full_data_dict): # to Upl, Soc\n future = pool.submit(caller_file, full_data_dict)\n print(\"Exit file pool exe\\n\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def roles_required(roles):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
print(roles, 'required')
print(args, kwargs, 'provided')
if kwargs['role']:
print(kwargs['role'])
if kwargs['role'] not in roles:
print('unauthorised')
return abort(401)
else:
print('authorised')
return func(*args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_1|>
from functools import wraps
from flask import request, abort
def roles_required(roles):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
print(roles, 'required')
print(args, kwargs, 'provided')
if kwargs['role']:
print(kwargs['role'])
if kwargs['role'] not in roles:
print('unauthorised')
return abort(401)
else:
print('authorised')
return func(*args, **kwargs)
return wrapper
return decorator
<|reserved_special_token_1|>
from functools import wraps
from flask import request, abort
# Apply Aspect Oriented Programming to server routes using roles
# e.g. we want to specify the role, perhaps supplied
# by the request or a jwt token, using a decorator
# to abstract away the authorization
# possible decorator implementation
def roles_required(roles):
def decorator(func):
# can't skip this @wraps function
# or error 'View function mapping is overwriting an existing endpoint function
# stackoverflow.com/questions/19964079
@wraps(func)
def wrapper(*args, **kwargs):
print(roles, 'required')
print(args, kwargs, 'provided')
if (kwargs['role']):
print(kwargs['role'])
if (kwargs['role'] not in roles):
print('unauthorised')
return abort(401)
else:
print('authorised')
return func(*args, **kwargs)
#return abort(401)
#func()
return wrapper
return decorator
# can in theory use jwt token parsing to check role here
|
flexible
|
{
"blob_id": "1adaca88cf41d4e4d3a55996022278102887be07",
"index": 3707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef roles_required(roles):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(roles, 'required')\n print(args, kwargs, 'provided')\n if kwargs['role']:\n print(kwargs['role'])\n if kwargs['role'] not in roles:\n print('unauthorised')\n return abort(401)\n else:\n print('authorised')\n return func(*args, **kwargs)\n return wrapper\n return decorator\n",
"step-3": "from functools import wraps\nfrom flask import request, abort\n\n\ndef roles_required(roles):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(roles, 'required')\n print(args, kwargs, 'provided')\n if kwargs['role']:\n print(kwargs['role'])\n if kwargs['role'] not in roles:\n print('unauthorised')\n return abort(401)\n else:\n print('authorised')\n return func(*args, **kwargs)\n return wrapper\n return decorator\n",
"step-4": "from functools import wraps\nfrom flask import request, abort\n# Apply Aspect Oriented Programming to server routes using roles\n\n# e.g. we want to specify the role, perhaps supplied\n# by the request or a jwt token, using a decorator\n# to abstract away the authorization\n\n\n# possible decorator implementation\ndef roles_required(roles):\n def decorator(func):\n # can't skip this @wraps function \n # or error 'View function mapping is overwriting an existing endpoint function\n # stackoverflow.com/questions/19964079\n @wraps(func)\n def wrapper(*args, **kwargs):\n print(roles, 'required')\n print(args, kwargs, 'provided')\n if (kwargs['role']):\n print(kwargs['role'])\n if (kwargs['role'] not in roles):\n print('unauthorised')\n return abort(401)\n else:\n print('authorised')\n return func(*args, **kwargs)\n #return abort(401)\n #func()\n return wrapper\n return decorator\n \n\n# can in theory use jwt token parsing to check role here\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def perm(n, inc):
perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))
perms = n, n + inc, n + inc * 2
if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):
return None
else:
return perms
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def perm(n, inc):
perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))
perms = n, n + inc, n + inc * 2
if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):
return None
else:
return perms
<|reserved_special_token_0|>
print(primes)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
prime_set = set(prime_sieve(10000))
def perm(n, inc):
perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))
perms = n, n + inc, n + inc * 2
if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):
return None
else:
return perms
primes = dropwhile(lambda x: x < 1000, prime_sieve(3333))
primes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))
primes = list(map(lambda x: x[0] * 10 ** 8 + x[1] * 10 ** 4 + x[2], primes))
print(primes)
<|reserved_special_token_1|>
from itertools import permutations, dropwhile
from pe_utils import prime_sieve
prime_set = set(prime_sieve(10000))
def perm(n, inc):
perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))
perms = n, n + inc, n + inc * 2
if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):
return None
else:
return perms
primes = dropwhile(lambda x: x < 1000, prime_sieve(3333))
primes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))
primes = list(map(lambda x: x[0] * 10 ** 8 + x[1] * 10 ** 4 + x[2], primes))
print(primes)
<|reserved_special_token_1|>
# find the 12-digit number formed by concatenating a series of 3 4-digit
# numbers who are permutations of each other and are all prime
from itertools import permutations, dropwhile
from pe_utils import prime_sieve
prime_set = set(prime_sieve(10000))
def perm(n, inc):
perm_set = set(map(lambda x: int("".join(x)), permutations(str(n))))
perms = (n, n + inc, n + inc*2)
if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):
return None
else:
return perms
primes = dropwhile(lambda x: x < 1000, prime_sieve(3333))
primes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))
primes = list(map(lambda x: x[0] * 10**8 + x[1] * 10**4 + x[2], primes))
print(primes)
|
flexible
|
{
"blob_id": "e03290746d6520fde63836e917f6af0c76596704",
"index": 3816,
"step-1": "<mask token>\n\n\ndef perm(n, inc):\n perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))\n perms = n, n + inc, n + inc * 2\n if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):\n return None\n else:\n return perms\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef perm(n, inc):\n perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))\n perms = n, n + inc, n + inc * 2\n if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):\n return None\n else:\n return perms\n\n\n<mask token>\nprint(primes)\n",
"step-3": "<mask token>\nprime_set = set(prime_sieve(10000))\n\n\ndef perm(n, inc):\n perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))\n perms = n, n + inc, n + inc * 2\n if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):\n return None\n else:\n return perms\n\n\nprimes = dropwhile(lambda x: x < 1000, prime_sieve(3333))\nprimes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))\nprimes = list(map(lambda x: x[0] * 10 ** 8 + x[1] * 10 ** 4 + x[2], primes))\nprint(primes)\n",
"step-4": "from itertools import permutations, dropwhile\nfrom pe_utils import prime_sieve\nprime_set = set(prime_sieve(10000))\n\n\ndef perm(n, inc):\n perm_set = set(map(lambda x: int(''.join(x)), permutations(str(n))))\n perms = n, n + inc, n + inc * 2\n if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):\n return None\n else:\n return perms\n\n\nprimes = dropwhile(lambda x: x < 1000, prime_sieve(3333))\nprimes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))\nprimes = list(map(lambda x: x[0] * 10 ** 8 + x[1] * 10 ** 4 + x[2], primes))\nprint(primes)\n",
"step-5": "# find the 12-digit number formed by concatenating a series of 3 4-digit\n# numbers who are permutations of each other and are all prime\n\nfrom itertools import permutations, dropwhile\nfrom pe_utils import prime_sieve\n\nprime_set = set(prime_sieve(10000))\n\ndef perm(n, inc):\n perm_set = set(map(lambda x: int(\"\".join(x)), permutations(str(n))))\n perms = (n, n + inc, n + inc*2)\n if any(map(lambda x: x not in prime_set or x not in perm_set, perms)):\n return None\n else:\n return perms\n\n\nprimes = dropwhile(lambda x: x < 1000, prime_sieve(3333))\nprimes = filter(lambda x: x != None, map(lambda x: perm(x, 3330), primes))\nprimes = list(map(lambda x: x[0] * 10**8 + x[1] * 10**4 + x[2], primes))\nprint(primes)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
import cryptography.hazmat.primitives.ciphers as ciphers
import struct
import secrets
import random
from typing import List
LOCO_PUBLICKEY = serialization.load_pem_public_key(b"""
-----BEGIN PUBLIC KEY-----
MIIBIDANBgkqhkiG9w0BAQEFAA
OCAQ0AMIIBCAKCAQEApElgRBx+
g7sniYFW7LE8ivrwXShKTRFV8l
XNItMXbN5QSC8vJ/cTSOTS619X
v5Zx7xXJIk4EKxtWesEGbgZpEU
P2xQ+IeH9oz0JxayEMvvD1nVNA
WgpWE4pociEoArsK7qY3YwXb1C
iDHo9hojLv7djbo3cwXvlyMh4T
UrX2RjCZPlVJxk/LVjzcl9ohJL
kl3eoSrf0AE4kQ9mk3+raEhq5D
v+IDxKYX+fIytUWKmrQJusjtre
9oVUX5sBOYZ0dzez/XapusEhUW
ImmB6mciVXfRXQ8IK4IH6vfNyx
MSOTfLEhRYN2SMLzplAYFiMV53
6tLS3VmG5GJRdkpDubqPeQIBAw==
-----END PUBLIC KEY-----"""
)
class V2SLClient:
"""
V2SL Socket Client
"""
def __init__(self):
self._aeskey = secrets.randbits(128).to_bytes(16, "little")
self._readbuf = bytearray()
self._handshaked = False
def handshake(self):
encrypted_key = LOCO_PUBLICKEY.encrypt(
self._aeskey,
padding.OAEP(
padding.MGF1(hashes.SHA1()),
hashes.SHA1(), None
)
)
handshake_pkt = struct.pack("<III", len(encrypted_key), 12, 2) + encrypted_key
return handshake_pkt
def _send(self, data: bytes) -> bytes:
iv = random.randbytes(16)
self._aes = ciphers.Cipher(
ciphers.algorithms.AES(self._aeskey),
ciphers.modes.CFB(iv)
)
enc = self._aes.encryptor()
enc_data = enc.update(data) + enc.finalize()
enc_pkt = struct.pack("<I", len(enc_data)+16) + iv + enc_data
return enc_pkt
def _recv(self) -> bytes:
if len(self._readbuf) < 4:
return None
enc_len, = struct.unpack("<I", self._readbuf[:4])
if len(self._readbuf[4:]) < enc_len:
return None
dec = self._aes.decryptor()
data = dec.update(self._readbuf[4:4+enc_len]) + dec.finalize()
del self._readbuf[:4+enc_len]
iv = data[:16]
return data[16:]
def send(self, data: bytes, split=2048) -> List[bytes]:
segments = []
if not self._handshaked:
self._handshaked = True
segments.append(self.handshake())
sentbytes = 0
while sentbytes < len(data):
segments.append(self._send(data[sentbytes:sentbytes+split]))
sentbytes += split
return segments
def recv(self, data) -> List[bytes]:
segments = []
self._readbuf += data
while (segment := self._recv()):
segments.append(segment)
return segments
|
normal
|
{
"blob_id": "db9919ab15988828d24b4430a124841f225860cc",
"index": 5764,
"step-1": "<mask token>\n\n\nclass V2SLClient:\n <mask token>\n <mask token>\n\n def handshake(self):\n encrypted_key = LOCO_PUBLICKEY.encrypt(self._aeskey, padding.OAEP(\n padding.MGF1(hashes.SHA1()), hashes.SHA1(), None))\n handshake_pkt = struct.pack('<III', len(encrypted_key), 12, 2\n ) + encrypted_key\n return handshake_pkt\n\n def _send(self, data: bytes) ->bytes:\n iv = random.randbytes(16)\n self._aes = ciphers.Cipher(ciphers.algorithms.AES(self._aeskey),\n ciphers.modes.CFB(iv))\n enc = self._aes.encryptor()\n enc_data = enc.update(data) + enc.finalize()\n enc_pkt = struct.pack('<I', len(enc_data) + 16) + iv + enc_data\n return enc_pkt\n <mask token>\n\n def send(self, data: bytes, split=2048) ->List[bytes]:\n segments = []\n if not self._handshaked:\n self._handshaked = True\n segments.append(self.handshake())\n sentbytes = 0\n while sentbytes < len(data):\n segments.append(self._send(data[sentbytes:sentbytes + split]))\n sentbytes += split\n return segments\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass V2SLClient:\n <mask token>\n <mask token>\n\n def handshake(self):\n encrypted_key = LOCO_PUBLICKEY.encrypt(self._aeskey, padding.OAEP(\n padding.MGF1(hashes.SHA1()), hashes.SHA1(), None))\n handshake_pkt = struct.pack('<III', len(encrypted_key), 12, 2\n ) + encrypted_key\n return handshake_pkt\n\n def _send(self, data: bytes) ->bytes:\n iv = random.randbytes(16)\n self._aes = ciphers.Cipher(ciphers.algorithms.AES(self._aeskey),\n ciphers.modes.CFB(iv))\n enc = self._aes.encryptor()\n enc_data = enc.update(data) + enc.finalize()\n enc_pkt = struct.pack('<I', len(enc_data) + 16) + iv + enc_data\n return enc_pkt\n\n def _recv(self) ->bytes:\n if len(self._readbuf) < 4:\n return None\n enc_len, = struct.unpack('<I', self._readbuf[:4])\n if len(self._readbuf[4:]) < enc_len:\n return None\n dec = self._aes.decryptor()\n data = dec.update(self._readbuf[4:4 + enc_len]) + dec.finalize()\n del self._readbuf[:4 + enc_len]\n iv = data[:16]\n return data[16:]\n\n def send(self, data: bytes, split=2048) ->List[bytes]:\n segments = []\n if not self._handshaked:\n self._handshaked = True\n segments.append(self.handshake())\n sentbytes = 0\n while sentbytes < len(data):\n segments.append(self._send(data[sentbytes:sentbytes + split]))\n sentbytes += split\n return segments\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass V2SLClient:\n <mask token>\n <mask token>\n\n def handshake(self):\n encrypted_key = LOCO_PUBLICKEY.encrypt(self._aeskey, padding.OAEP(\n padding.MGF1(hashes.SHA1()), hashes.SHA1(), None))\n handshake_pkt = struct.pack('<III', len(encrypted_key), 12, 2\n ) + encrypted_key\n return handshake_pkt\n\n def _send(self, data: bytes) ->bytes:\n iv = random.randbytes(16)\n self._aes = ciphers.Cipher(ciphers.algorithms.AES(self._aeskey),\n ciphers.modes.CFB(iv))\n enc = self._aes.encryptor()\n enc_data = enc.update(data) + enc.finalize()\n enc_pkt = struct.pack('<I', len(enc_data) + 16) + iv + enc_data\n return enc_pkt\n\n def _recv(self) ->bytes:\n if len(self._readbuf) < 4:\n return None\n enc_len, = struct.unpack('<I', self._readbuf[:4])\n if len(self._readbuf[4:]) < enc_len:\n return None\n dec = self._aes.decryptor()\n data = dec.update(self._readbuf[4:4 + enc_len]) + dec.finalize()\n del self._readbuf[:4 + enc_len]\n iv = data[:16]\n return data[16:]\n\n def send(self, data: bytes, split=2048) ->List[bytes]:\n segments = []\n if not self._handshaked:\n self._handshaked = True\n segments.append(self.handshake())\n sentbytes = 0\n while sentbytes < len(data):\n segments.append(self._send(data[sentbytes:sentbytes + split]))\n sentbytes += split\n return segments\n\n def recv(self, data) ->List[bytes]:\n segments = []\n self._readbuf += data\n while (segment := self._recv()):\n segments.append(segment)\n return segments\n",
"step-4": "<mask token>\n\n\nclass V2SLClient:\n \"\"\"\n V2SL Socket Client\n \"\"\"\n\n def __init__(self):\n self._aeskey = secrets.randbits(128).to_bytes(16, 'little')\n self._readbuf = bytearray()\n self._handshaked = False\n\n def handshake(self):\n encrypted_key = LOCO_PUBLICKEY.encrypt(self._aeskey, padding.OAEP(\n padding.MGF1(hashes.SHA1()), hashes.SHA1(), None))\n handshake_pkt = struct.pack('<III', len(encrypted_key), 12, 2\n ) + encrypted_key\n return handshake_pkt\n\n def _send(self, data: bytes) ->bytes:\n iv = random.randbytes(16)\n self._aes = ciphers.Cipher(ciphers.algorithms.AES(self._aeskey),\n ciphers.modes.CFB(iv))\n enc = self._aes.encryptor()\n enc_data = enc.update(data) + enc.finalize()\n enc_pkt = struct.pack('<I', len(enc_data) + 16) + iv + enc_data\n return enc_pkt\n\n def _recv(self) ->bytes:\n if len(self._readbuf) < 4:\n return None\n enc_len, = struct.unpack('<I', self._readbuf[:4])\n if len(self._readbuf[4:]) < enc_len:\n return None\n dec = self._aes.decryptor()\n data = dec.update(self._readbuf[4:4 + enc_len]) + dec.finalize()\n del self._readbuf[:4 + enc_len]\n iv = data[:16]\n return data[16:]\n\n def send(self, data: bytes, split=2048) ->List[bytes]:\n segments = []\n if not self._handshaked:\n self._handshaked = True\n segments.append(self.handshake())\n sentbytes = 0\n while sentbytes < len(data):\n segments.append(self._send(data[sentbytes:sentbytes + split]))\n sentbytes += split\n return segments\n\n def recv(self, data) ->List[bytes]:\n segments = []\n self._readbuf += data\n while (segment := self._recv()):\n segments.append(segment)\n return segments\n",
"step-5": "from cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nimport cryptography.hazmat.primitives.ciphers as ciphers\nimport struct\nimport secrets\nimport random\n\nfrom typing import List\n\nLOCO_PUBLICKEY = serialization.load_pem_public_key(b\"\"\"\n-----BEGIN PUBLIC KEY-----\nMIIBIDANBgkqhkiG9w0BAQEFAA\nOCAQ0AMIIBCAKCAQEApElgRBx+\ng7sniYFW7LE8ivrwXShKTRFV8l\nXNItMXbN5QSC8vJ/cTSOTS619X\nv5Zx7xXJIk4EKxtWesEGbgZpEU\nP2xQ+IeH9oz0JxayEMvvD1nVNA\nWgpWE4pociEoArsK7qY3YwXb1C\niDHo9hojLv7djbo3cwXvlyMh4T\nUrX2RjCZPlVJxk/LVjzcl9ohJL\nkl3eoSrf0AE4kQ9mk3+raEhq5D\nv+IDxKYX+fIytUWKmrQJusjtre\n9oVUX5sBOYZ0dzez/XapusEhUW\nImmB6mciVXfRXQ8IK4IH6vfNyx\nMSOTfLEhRYN2SMLzplAYFiMV53\n6tLS3VmG5GJRdkpDubqPeQIBAw==\n-----END PUBLIC KEY-----\"\"\"\n)\n\nclass V2SLClient:\n \"\"\"\n V2SL Socket Client\n \"\"\"\n def __init__(self):\n self._aeskey = secrets.randbits(128).to_bytes(16, \"little\")\n self._readbuf = bytearray()\n self._handshaked = False\n \n def handshake(self):\n encrypted_key = LOCO_PUBLICKEY.encrypt(\n self._aeskey,\n padding.OAEP(\n padding.MGF1(hashes.SHA1()),\n hashes.SHA1(), None\n )\n )\n handshake_pkt = struct.pack(\"<III\", len(encrypted_key), 12, 2) + encrypted_key\n return handshake_pkt\n \n def _send(self, data: bytes) -> bytes:\n iv = random.randbytes(16)\n self._aes = ciphers.Cipher(\n ciphers.algorithms.AES(self._aeskey),\n ciphers.modes.CFB(iv)\n )\n enc = self._aes.encryptor()\n enc_data = enc.update(data) + enc.finalize()\n enc_pkt = struct.pack(\"<I\", len(enc_data)+16) + iv + enc_data\n return enc_pkt\n \n def _recv(self) -> bytes:\n if len(self._readbuf) < 4:\n return None\n enc_len, = struct.unpack(\"<I\", self._readbuf[:4])\n if len(self._readbuf[4:]) < enc_len:\n return None\n dec = self._aes.decryptor()\n data = dec.update(self._readbuf[4:4+enc_len]) + dec.finalize()\n del self._readbuf[:4+enc_len]\n iv = data[:16]\n return data[16:]\n\n def send(self, data: bytes, split=2048) -> List[bytes]:\n segments = []\n if not self._handshaked:\n self._handshaked = True\n segments.append(self.handshake())\n sentbytes = 0\n while sentbytes < len(data):\n segments.append(self._send(data[sentbytes:sentbytes+split]))\n sentbytes += split\n return segments\n\n def recv(self, data) -> List[bytes]:\n segments = []\n self._readbuf += data\n while (segment := self._recv()):\n segments.append(segment)\n return segments\n",
"step-ids": [
4,
5,
6,
8,
11
]
}
|
[
4,
5,
6,
8,
11
] |
import sys
sys.path.append('preprocess')
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
import numpy as np
import refit_cfg
import os
import random
from sklearn.model_selection import train_test_split
name = ['WashingMachine', 'Kettle', 'Microwave', 'Fridge', 'Dishwasher']
appliance_dict = {
'WashingMachine': refit_cfg.washingmachine,
'Kettle': refit_cfg.kettle,
'Microwave': refit_cfg.microwave,
'Fridge': refit_cfg.fridge,
'Dishwasher': refit_cfg.dishwasher
}
def align_process(house_id):
data = np.load('data\\REFIT\\original_data\\%d.npy' % house_id)
new_data = []
current_index = 0
current_time = int(data[0][0])
end_time = int(data[-1][0]) + 8
interval_threshold = refit_cfg.separation_threshold
isend = 0
data_length = len(data)
while current_time <= end_time:
current_interval = int(data[current_index+1][0]) - int(data[current_index][0])
if current_interval < interval_threshold: # small interval
if current_time > int(data[current_index][0]):
temp_index = current_index + 1
while current_time > int(data[temp_index][0]):
temp_index += 1
if temp_index > (data_length-1):
temp_index -= 1
break
if abs(current_time - int(data[temp_index-1][0])) > abs(int(data[temp_index][0])-current_time):
current_index = temp_index
if temp_index == (data_length-1):
print('The end!')
isend = 1
else:
current_index = temp_index - 1
t = []
for element in data[current_index]:
t.append(element)
t[0] = current_time
new_data.append(t)
if isend == 1:
break
current_time += 8
if current_index % 1000 == 0:
print('House %d processing: %f' % (house_id, current_index/data_length))
else: # big interval
current_index += 1
current_time = int(data[current_index][0])
np.save('data\\REFIT\\after_align\\%d.npy' % house_id, new_data)
def visual(house_id, channel_id, start, length):
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
print(len(data))
target = []
c = channel_id+1
for r in data:
target.append(int(r[c]))
y = target[start:start+length]
plt.plot(y)
plt.show()
def diff(house_id):
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
d = []
for i in range(len(data)-1):
d.append(int(data[i+1][0])-int(data[i][0]))
plt.plot(d)
plt.show()
plt.close()
def appliance_separation(dict, appliance_name):
path = 'data\\REFIT\\appliance_data\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, channel_id in dict.items():
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
appliance_data = []
for row in data:
appliance_data.append([row[1], row[channel_id+1]])
np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)), appliance_data)
print('Appliance %s House %d complete!' % (appliance_name, house_id))
def show_appliance(house_id, appliance_name):
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_align\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
print(len(data))
mains = []
app = []
for i in data:
mains.append(int(i[0]))
app.append(int(i[1]))
plt.figure(figsize=(20, 8))
plt.plot(mains)
plt.plot(app)
plt.show()
def cull(cull_dict):
for appliance_name, _dict in cull_dict.items():
path = 'data\\REFIT\\after_culling\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, cull_list in _dict.items():
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_align\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
new_data = []
_cull_list = [[0, cull_list[0][0]]]
for i in range(len(cull_list)-1):
_cull_list.append([cull_list[i][1], cull_list[i+1][0]])
_cull_list.append([cull_list[-1][1], (len(data)-1)])
for i in _cull_list:
if i[1] - i[0] != 0:
for j in range(i[0], i[1]):
new_data.append(data[j])
np.save('data\\REFIT\\after_culling\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id), new_data)
print('House %d %s complete!' % (house_id, appliance_name))
def appliance_separation(dict, appliance_name):
"""
将各个电器的数据进行分解,放置到appliance_data文件夹下对应电器的文件夹中,以house_id和channel_id进行命名
:param dict: 电器数据来源
:param appliance_name: 当前电器的名称,用以创建文件夹
:return:
"""
path = 'data\\REFIT\\appliance_data\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, channel_id in dict.items():
data = np.load('data\\REFIT\\after_align\\%d.npy' % house_id)
appliance_data = []
for row in data:
appliance_data.append([row[1], row[channel_id+1]]) # 将mains 和 appliance 作为一条单独的记录
np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)), appliance_data)
print('Appliance %s House %d complete!' % (appliance_name, house_id))
def show_appliance(house_id, appliance_name):
"""
具体观察每个电器的图形表示,将大段的数据缺失或者数据错误进行标注,构造cull_dict字典,在cull进行片段删除
:param house_id:
:param appliance_name:
:return:
"""
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_culling\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
print(len(data))
mains = []
app = []
for i in data:
mains.append(int(i[0]))
app.append(int(i[1]))
plt.figure(figsize=(20, 8))
plt.plot(mains)
plt.plot(app)
plt.show()
def cull(cull_dict):
"""
根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对
:param cull_dict:
:return:
"""
for appliance_name, _dict in cull_dict.items():
path = 'data\\REFIT\\after_culling_2\\%s' % appliance_name
if not os.path.exists(path):
os.mkdir(path)
for house_id, cull_list in _dict.items():
channel_id = appliance_dict[appliance_name][house_id]
data = np.load('data\\REFIT\\after_culling_2\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id))
new_data = []
# 对cull_list进行变形,变成表征合理数据的区间
_cull_list = [[0, cull_list[0][0]]]
for i in range(len(cull_list)-1):
_cull_list.append([cull_list[i][1], cull_list[i+1][0]])
_cull_list.append([cull_list[-1][1], (len(data)-1)])
for i in _cull_list:
if i[1] - i[0] != 0:
for j in range(i[0], i[1]):
new_data.append(data[j])
np.save('data\\REFIT\\after_culling_2\\%s\\%d_%d.npy' % (appliance_name, house_id, channel_id), new_data)
print('House %d %s complete!' % (house_id, appliance_name))
def separate(appliance_name):
window_width = refit_cfg.window_width[appliance_name]
data_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
count = 0
appliance_train_validation = []
appliance_test = []
main_train_validation = []
main_test = []
for house_id, channel_id in refit_cfg.train_validation[appliance_name].items():
# train & validation
appliance_train_validation.clear()
main_train_validation.clear()
data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id, channel_id)))
current_head = 0
data_length = len(data)
end = data_length - window_width - 1
while current_head < end:
temp_main = []
temp_appliance = []
for i in range(current_head, current_head+window_width):
temp_main.append(data[i][0])
temp_appliance.append(data[i][1])
r = random.random()
current_head += int(window_width*r)
appliance_train_validation.append(temp_appliance)
main_train_validation.append(temp_main)
count += 1
if count % 1000 == 0:
print('T & V 1: House %d %f' % (house_id, (current_head / data_length)))
data_length -= window_width
random_clip = refit_cfg.random_clip[appliance_name]
for i in range(random_clip):
r = random.random()
start = int(r*data_length)
temp_main = []
temp_appliance = []
for j in range(start, start + window_width):
temp_main.append(data[j][0])
temp_appliance.append(data[j][1])
appliance_train_validation.append(temp_appliance)
main_train_validation.append(temp_main)
count += 1
if count % 1000 == 0:
print('T & V 2: House %d %f' % (house_id, (i / random_clip)))
print('Train & Validation: House %d %s complete!' % (house_id, appliance_name))
np.save(os.path.join(data_path, '1024\\appliance_train_validation_%d.npy' % house_id), appliance_train_validation)
np.save(os.path.join(data_path, '1024\\main_train_validation_%d.npy' % house_id), main_train_validation)
# test
count = 0
for house_id, channel_id in refit_cfg.test[appliance_name].items():
appliance_test.clear()
main_test.clear()
data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id, channel_id)))
current_head = 0
data_length = len(data)
end = data_length - window_width - 1
while current_head < end:
temp_main = []
temp_appliance = []
for i in range(current_head, current_head+window_width):
temp_main.append(data[i][0])
temp_appliance.append(data[i][1])
r = random.random()
current_head += int(r*window_width)
appliance_test.append(temp_appliance)
main_test.append(temp_main)
count += 1
if count % 1000 == 0:
print('Test 1: House %d %f' % (house_id, (current_head / data_length)))
data_length -= window_width
for i in range(refit_cfg.random_clip[appliance_name]):
r = random.random()
start = int(r*data_length)
temp_main = []
temp_appliance = []
for j in range(start, start + window_width):
temp_main.append(data[j][0])
temp_appliance.append(data[j][1])
appliance_test.append(temp_appliance)
main_test.append(temp_main)
count += 1
if count % 1000 == 0:
print('Test 2: House %d %f' % (house_id, (i / data_length)))
print('Test 2: House %d %s complete!' % (house_id, appliance_name))
np.save(os.path.join(data_path, '1024\\appliance_test_%d.npy' % house_id), appliance_test)
np.save(os.path.join(data_path, '1024\\main_test_%d.npy' % house_id), main_test)
def clip_visual(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train_.npy'))
main_data = np.load(os.path.join(base_path, 'main_train_.npy'))
print('Data load complete!')
loop = 1000
x = np.linspace(256, 768, 512)
length = len(appliance_data)
for i in range(loop):
r = int(random.random()*length)
plt.figure(figsize=(25, 10), dpi=100)
plt.subplot(211)
plt.xlim(0, 1024)
plt.plot(main_data[r])
plt.subplot(212)
plt.xlim(0, 1024)
plt.plot(x, appliance_data[r])
savefig(os.path.join(base_path, 'clip_view\\%d.jpg' % i))
plt.close()
def train_validation_split(appliance_name):
data_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance = np.load(os.path.join(data_path, 'appliance_train_validation.npy'))
main = np.load(os.path.join(data_path, 'main_train_validation.npy'))
appliance_train, appliance_validation, main_train, main_validation = \
train_test_split(appliance, main, test_size=0.2)
print(len(appliance_train))
print(len(main_train))
np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)
np.save(os.path.join(data_path, 'main_train.npy'), main_train)
np.save(os.path.join(data_path, 'appliance_validation.npy'), appliance_validation)
np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)
def data_integration(appliance_name):
data_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance = []
main = []
for house_id, channel_id in refit_cfg.train_validation[appliance_name].items():
appliance_data = np.load(os.path.join(data_path, 'appliance_train_validation_%d.npy' % house_id))
main_data = np.load(os.path.join(data_path, 'main_train_validation_%d.npy' % house_id))
for i in appliance_data:
appliance.append(i)
for i in main_data:
main.append(i)
print(len(appliance))
print(len(main))
np.save(os.path.join(data_path, 'appliance_train_validation.npy'), appliance)
np.save(os.path.join(data_path, 'main_train_validation.npy'), main)
appliance_test = []
main_test = []
for house_id, channel_id in refit_cfg.test[appliance_name].items():
appliance_data = np.load(os.path.join(data_path, 'appliance_test_%d.npy' % house_id))
main_data = np.load(os.path.join(data_path, 'main_test_%d.npy' % house_id))
for i in appliance_data:
appliance_test.append(i)
for i in main_data:
main_test.append(i)
print(len(appliance_test))
print(len(main_test))
np.save(os.path.join(data_path, 'appliance_test.npy'), appliance_test)
np.save(os.path.join(data_path, 'main_test.npy'), main_test)
def positive_negative(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))
count = 0
threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]
d = {}
for i in range(len(threshold)):
d[threshold[i]] = 0
print(d)
for th in threshold:
for i in appliance_data:
sum = 0
for j in i:
sum += int(j)
if sum > th:
d[th] += 1
print('Thres %d complete!' % th)
for thres, count in d.items():
print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data), count/len(appliance_data)))
def clip_view(appliance_name, thres):
base_path = 'data\\REFIT\\after_culling\\%s' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))
count = 0
for i in appliance_data:
sum = 0
for j in i:
sum += int(j)
if sum > thres:
plt.figure(figsize=(25, 10), dpi=100)
plt.plot(i.astype(int))
savefig(os.path.join(base_path, 'clip_view\\%d.jpg' % count))
plt.close()
count += 1
def test_process(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))
temp = [0.0]*512
new_app = []
for i in range(len(appliance_data)):
max = np.max(appliance_data[i])
if max < 0.05:
print(max)
new_app.append(temp)
else:
new_app.append(appliance_data[i])
np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)
def separate_positive_negative(appliance_name, thres, peak):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))
main_data = np.load(os.path.join(base_path, 'main_train.npy'))
count = 0
appliance_positive = []
appliance_negative = []
main_positive = []
main_negative = []
appliance_temp = [0] * 1024
for i in range(len(appliance_data)):
sum = 0
max = 0
for j in appliance_data[i]:
sum += int(j)
for j in range(512):
if int(appliance_data[i][j+256]) > max:
max = int(appliance_data[i][j+256])
if max < peak:
sum = 0
if sum > thres:
appliance_positive.append(appliance_data[i])
main_positive.append(main_data[i])
else:
appliance_negative.append(appliance_temp)
main_negative.append(main_data[i])
if i % 1000 == 0:
print('Processing: %f' % (i/len(appliance_data)))
np.save(os.path.join(base_path, 'appliance_positive.npy'), appliance_positive)
np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)
np.save(os.path.join(base_path, 'appliance_negative.npy'), appliance_negative)
np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)
def generate_balanced_dataset(appliance_name, negative_ratio):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_positive = list(np.load(os.path.join(base_path, 'appliance_positive.npy')))
appliance_negative = np.load(os.path.join(base_path, 'appliance_negative.npy'))
main_positive = list(np.load(os.path.join(base_path, 'main_positive.npy')))
main_negative = np.load(os.path.join(base_path, 'main_negative.npy'))
print('Data load complete!')
positive_length = len(appliance_positive)
negative_length = len(appliance_negative)
print('Postive length: %d negative length: %d' % (positive_length, negative_length))
for i in range(int(positive_length*negative_ratio)):
r = int(random.random()*negative_length)
appliance_positive.append(appliance_negative[r])
main_positive.append(main_negative[r])
print('Data generate complete! length: %d' % (len(appliance_positive)))
index = np.linspace(0, len(appliance_positive)-1, len(appliance_positive)).astype(int)
random.shuffle(index)
appliance_new = []
main_new = []
for i in index:
appliance_new.append(appliance_positive[i])
main_new.append(main_positive[i])
print('Data shuffle complete!')
np.save(os.path.join(base_path, 'appliance_train_balanced.npy'), appliance_new)
np.save(os.path.join(base_path, 'main_train_balanced.npy'), main_new)
print('Data save complete!')
def shrink(appliance_name, scale):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_train_balanced.npy'))
main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))
appliance_new = []
main_new = []
print('Data load complete!')
for i in range(len(appliance_data)):
appliance_temp = []
main_temp = []
for j in range(len(appliance_data[i])):
appliance_temp.append(float(int(appliance_data[i][j])/scale))
for j in range(len(main_data[i])):
main_temp.append(float(int(main_data[i][j])/scale))
appliance_new.append(appliance_temp)
main_new.append(main_temp)
print('Process complete!')
np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale), appliance_new)
np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)
def shrink_validation(appliance_name, scale):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_validation.npy'))
main_data = np.load(os.path.join(base_path, 'main_validation.npy'))
appliance_new = []
main_new = []
print('Data load complete!')
for i in range(len(appliance_data)):
appliance_temp = []
main_temp = []
for j in range(len(appliance_data[i])):
appliance_temp.append(float(int(appliance_data[i][j])/scale))
for j in range(len(main_data[i])):
main_temp.append(float(int(main_data[i][j])/scale))
appliance_new.append(appliance_temp)
main_new.append(main_temp)
print('Process complete!')
np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale), appliance_new)
np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new)
def appliance_1024to512(appliance_name):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_train = np.load(os.path.join(base_path, 'appliance_train_1000.npy'))
appliance_validation = np.load(os.path.join(base_path, 'appliance_validation_1000.npy'))
appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy'))
at_new = []
av_new = []
ae_new = []
for i in range(len(appliance_train)):
at_temp = []
for j in range(256, 768):
at_temp.append(float(appliance_train[i][j]))
at_new.append(at_temp)
for i in range(len(appliance_validation)):
av_temp = []
for j in range(256, 768):
av_temp.append(float(appliance_validation[i][j]))
av_new.append(av_temp)
for i in range(len(appliance_test)):
ae_temp = []
for j in range(256, 768):
ae_temp.append(float(appliance_test[i][j]))
ae_new.append(ae_temp)
np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)
np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)
np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)
def shrink_test(appliance_name, scale):
base_path = 'data\\REFIT\\after_culling\\%s\\1024' % appliance_name
appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))
main_data = np.load(os.path.join(base_path, 'main_test.npy'))
appliance_new = []
main_new = []
print('Data load complete!')
for i in range(len(appliance_data)):
appliance_temp = []
main_temp = []
for j in range(len(appliance_data[i])):
appliance_temp.append(float(int(appliance_data[i][j])/scale))
for j in range(len(main_data[i])):
main_temp.append(float(int(main_data[i][j])/scale))
appliance_new.append(appliance_temp)
main_new.append(main_temp)
print('Process complete!')
np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)
np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)
if __name__ == '__main__':
appliance_name = 'WashingMachine'
separate(appliance_name)
data_integration(appliance_name)
train_validation_split(appliance_name)
separate_positive_negative(appliance_name, 1500, 20)
generate_balanced_dataset(appliance_name, 1)
shrink(appliance_name, 1000)
shrink_validation(appliance_name, 1000)
shrink_test(appliance_name, 1000)
appliance_1024to512(appliance_name)
# test_process(appliance_name)
print('Process complete!!!')
|
normal
|
{
"blob_id": "30405a6f20a44b2252b6894ef6d0e818861702f8",
"index": 9857,
"step-1": "<mask token>\n\n\ndef align_process(house_id):\n data = np.load('data\\\\REFIT\\\\original_data\\\\%d.npy' % house_id)\n new_data = []\n current_index = 0\n current_time = int(data[0][0])\n end_time = int(data[-1][0]) + 8\n interval_threshold = refit_cfg.separation_threshold\n isend = 0\n data_length = len(data)\n while current_time <= end_time:\n current_interval = int(data[current_index + 1][0]) - int(data[\n current_index][0])\n if current_interval < interval_threshold:\n if current_time > int(data[current_index][0]):\n temp_index = current_index + 1\n while current_time > int(data[temp_index][0]):\n temp_index += 1\n if temp_index > data_length - 1:\n temp_index -= 1\n break\n if abs(current_time - int(data[temp_index - 1][0])) > abs(\n int(data[temp_index][0]) - current_time):\n current_index = temp_index\n if temp_index == data_length - 1:\n print('The end!')\n isend = 1\n else:\n current_index = temp_index - 1\n t = []\n for element in data[current_index]:\n t.append(element)\n t[0] = current_time\n new_data.append(t)\n if isend == 1:\n break\n current_time += 8\n if current_index % 1000 == 0:\n print('House %d processing: %f' % (house_id, current_index /\n data_length))\n else:\n current_index += 1\n current_time = int(data[current_index][0])\n np.save('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id, new_data)\n\n\n<mask token>\n\n\ndef show_appliance(house_id, appliance_name):\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\n<mask token>\n\n\ndef cull(cull_dict):\n \"\"\"\n 根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对\n :param cull_dict:\n :return:\n \"\"\"\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling_2\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' %\n (appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef separate(appliance_name):\n window_width = refit_cfg.window_width[appliance_name]\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n count = 0\n appliance_train_validation = []\n appliance_test = []\n main_train_validation = []\n main_test = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name\n ].items():\n appliance_train_validation.clear()\n main_train_validation.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(window_width * r)\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n random_clip = refit_cfg.random_clip[appliance_name]\n for i in range(random_clip):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 2: House %d %f' % (house_id, i / random_clip))\n print('Train & Validation: House %d %s complete!' % (house_id,\n appliance_name))\n np.save(os.path.join(data_path, \n '1024\\\\appliance_train_validation_%d.npy' % house_id),\n appliance_train_validation)\n np.save(os.path.join(data_path, \n '1024\\\\main_train_validation_%d.npy' % house_id),\n main_train_validation)\n count = 0\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_test.clear()\n main_test.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(r * window_width)\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n for i in range(refit_cfg.random_clip[appliance_name]):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 2: House %d %f' % (house_id, i / data_length))\n print('Test 2: House %d %s complete!' % (house_id, appliance_name))\n np.save(os.path.join(data_path, '1024\\\\appliance_test_%d.npy' %\n house_id), appliance_test)\n np.save(os.path.join(data_path, '1024\\\\main_test_%d.npy' % house_id\n ), main_test)\n\n\n<mask token>\n\n\ndef train_validation_split(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = np.load(os.path.join(data_path,\n 'appliance_train_validation.npy'))\n main = np.load(os.path.join(data_path, 'main_train_validation.npy'))\n appliance_train, appliance_validation, main_train, main_validation = (\n train_test_split(appliance, main, test_size=0.2))\n print(len(appliance_train))\n print(len(main_train))\n np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)\n np.save(os.path.join(data_path, 'main_train.npy'), main_train)\n np.save(os.path.join(data_path, 'appliance_validation.npy'),\n appliance_validation)\n np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)\n\n\n<mask token>\n\n\ndef positive_negative(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]\n d = {}\n for i in range(len(threshold)):\n d[threshold[i]] = 0\n print(d)\n for th in threshold:\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > th:\n d[th] += 1\n print('Thres %d complete!' % th)\n for thres, count in d.items():\n print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data),\n count / len(appliance_data)))\n\n\ndef clip_view(appliance_name, thres):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > thres:\n plt.figure(figsize=(25, 10), dpi=100)\n plt.plot(i.astype(int))\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % count))\n plt.close()\n count += 1\n\n\ndef test_process(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))\n temp = [0.0] * 512\n new_app = []\n for i in range(len(appliance_data)):\n max = np.max(appliance_data[i])\n if max < 0.05:\n print(max)\n new_app.append(temp)\n else:\n new_app.append(appliance_data[i])\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)\n\n\ndef separate_positive_negative(appliance_name, thres, peak):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train.npy'))\n count = 0\n appliance_positive = []\n appliance_negative = []\n main_positive = []\n main_negative = []\n appliance_temp = [0] * 1024\n for i in range(len(appliance_data)):\n sum = 0\n max = 0\n for j in appliance_data[i]:\n sum += int(j)\n for j in range(512):\n if int(appliance_data[i][j + 256]) > max:\n max = int(appliance_data[i][j + 256])\n if max < peak:\n sum = 0\n if sum > thres:\n appliance_positive.append(appliance_data[i])\n main_positive.append(main_data[i])\n else:\n appliance_negative.append(appliance_temp)\n main_negative.append(main_data[i])\n if i % 1000 == 0:\n print('Processing: %f' % (i / len(appliance_data)))\n np.save(os.path.join(base_path, 'appliance_positive.npy'),\n appliance_positive)\n np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)\n np.save(os.path.join(base_path, 'appliance_negative.npy'),\n appliance_negative)\n np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)\n\n\n<mask token>\n\n\ndef shrink(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_train_balanced.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)\n\n\ndef shrink_validation(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_validation.npy'))\n main_data = np.load(os.path.join(base_path, 'main_validation.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new\n )\n\n\ndef appliance_1024to512(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_train = np.load(os.path.join(base_path,\n 'appliance_train_1000.npy'))\n appliance_validation = np.load(os.path.join(base_path,\n 'appliance_validation_1000.npy'))\n appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy')\n )\n at_new = []\n av_new = []\n ae_new = []\n for i in range(len(appliance_train)):\n at_temp = []\n for j in range(256, 768):\n at_temp.append(float(appliance_train[i][j]))\n at_new.append(at_temp)\n for i in range(len(appliance_validation)):\n av_temp = []\n for j in range(256, 768):\n av_temp.append(float(appliance_validation[i][j]))\n av_new.append(av_temp)\n for i in range(len(appliance_test)):\n ae_temp = []\n for j in range(256, 768):\n ae_temp.append(float(appliance_test[i][j]))\n ae_new.append(ae_temp)\n np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)\n np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)\n\n\ndef shrink_test(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))\n main_data = np.load(os.path.join(base_path, 'main_test.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)\n np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef align_process(house_id):\n data = np.load('data\\\\REFIT\\\\original_data\\\\%d.npy' % house_id)\n new_data = []\n current_index = 0\n current_time = int(data[0][0])\n end_time = int(data[-1][0]) + 8\n interval_threshold = refit_cfg.separation_threshold\n isend = 0\n data_length = len(data)\n while current_time <= end_time:\n current_interval = int(data[current_index + 1][0]) - int(data[\n current_index][0])\n if current_interval < interval_threshold:\n if current_time > int(data[current_index][0]):\n temp_index = current_index + 1\n while current_time > int(data[temp_index][0]):\n temp_index += 1\n if temp_index > data_length - 1:\n temp_index -= 1\n break\n if abs(current_time - int(data[temp_index - 1][0])) > abs(\n int(data[temp_index][0]) - current_time):\n current_index = temp_index\n if temp_index == data_length - 1:\n print('The end!')\n isend = 1\n else:\n current_index = temp_index - 1\n t = []\n for element in data[current_index]:\n t.append(element)\n t[0] = current_time\n new_data.append(t)\n if isend == 1:\n break\n current_time += 8\n if current_index % 1000 == 0:\n print('House %d processing: %f' % (house_id, current_index /\n data_length))\n else:\n current_index += 1\n current_time = int(data[current_index][0])\n np.save('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id, new_data)\n\n\n<mask token>\n\n\ndef show_appliance(house_id, appliance_name):\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef appliance_separation(dict, appliance_name):\n \"\"\"\n 将各个电器的数据进行分解,放置到appliance_data文件夹下对应电器的文件夹中,以house_id和channel_id进行命名\n :param dict: 电器数据来源\n :param appliance_name: 当前电器的名称,用以创建文件夹\n :return:\n \"\"\"\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id + 1]])\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)),\n appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\n<mask token>\n\n\ndef cull(cull_dict):\n \"\"\"\n 根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对\n :param cull_dict:\n :return:\n \"\"\"\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling_2\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' %\n (appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef separate(appliance_name):\n window_width = refit_cfg.window_width[appliance_name]\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n count = 0\n appliance_train_validation = []\n appliance_test = []\n main_train_validation = []\n main_test = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name\n ].items():\n appliance_train_validation.clear()\n main_train_validation.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(window_width * r)\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n random_clip = refit_cfg.random_clip[appliance_name]\n for i in range(random_clip):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 2: House %d %f' % (house_id, i / random_clip))\n print('Train & Validation: House %d %s complete!' % (house_id,\n appliance_name))\n np.save(os.path.join(data_path, \n '1024\\\\appliance_train_validation_%d.npy' % house_id),\n appliance_train_validation)\n np.save(os.path.join(data_path, \n '1024\\\\main_train_validation_%d.npy' % house_id),\n main_train_validation)\n count = 0\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_test.clear()\n main_test.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(r * window_width)\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n for i in range(refit_cfg.random_clip[appliance_name]):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 2: House %d %f' % (house_id, i / data_length))\n print('Test 2: House %d %s complete!' % (house_id, appliance_name))\n np.save(os.path.join(data_path, '1024\\\\appliance_test_%d.npy' %\n house_id), appliance_test)\n np.save(os.path.join(data_path, '1024\\\\main_test_%d.npy' % house_id\n ), main_test)\n\n\n<mask token>\n\n\ndef train_validation_split(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = np.load(os.path.join(data_path,\n 'appliance_train_validation.npy'))\n main = np.load(os.path.join(data_path, 'main_train_validation.npy'))\n appliance_train, appliance_validation, main_train, main_validation = (\n train_test_split(appliance, main, test_size=0.2))\n print(len(appliance_train))\n print(len(main_train))\n np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)\n np.save(os.path.join(data_path, 'main_train.npy'), main_train)\n np.save(os.path.join(data_path, 'appliance_validation.npy'),\n appliance_validation)\n np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)\n\n\n<mask token>\n\n\ndef positive_negative(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]\n d = {}\n for i in range(len(threshold)):\n d[threshold[i]] = 0\n print(d)\n for th in threshold:\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > th:\n d[th] += 1\n print('Thres %d complete!' % th)\n for thres, count in d.items():\n print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data),\n count / len(appliance_data)))\n\n\ndef clip_view(appliance_name, thres):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > thres:\n plt.figure(figsize=(25, 10), dpi=100)\n plt.plot(i.astype(int))\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % count))\n plt.close()\n count += 1\n\n\ndef test_process(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))\n temp = [0.0] * 512\n new_app = []\n for i in range(len(appliance_data)):\n max = np.max(appliance_data[i])\n if max < 0.05:\n print(max)\n new_app.append(temp)\n else:\n new_app.append(appliance_data[i])\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)\n\n\ndef separate_positive_negative(appliance_name, thres, peak):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train.npy'))\n count = 0\n appliance_positive = []\n appliance_negative = []\n main_positive = []\n main_negative = []\n appliance_temp = [0] * 1024\n for i in range(len(appliance_data)):\n sum = 0\n max = 0\n for j in appliance_data[i]:\n sum += int(j)\n for j in range(512):\n if int(appliance_data[i][j + 256]) > max:\n max = int(appliance_data[i][j + 256])\n if max < peak:\n sum = 0\n if sum > thres:\n appliance_positive.append(appliance_data[i])\n main_positive.append(main_data[i])\n else:\n appliance_negative.append(appliance_temp)\n main_negative.append(main_data[i])\n if i % 1000 == 0:\n print('Processing: %f' % (i / len(appliance_data)))\n np.save(os.path.join(base_path, 'appliance_positive.npy'),\n appliance_positive)\n np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)\n np.save(os.path.join(base_path, 'appliance_negative.npy'),\n appliance_negative)\n np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)\n\n\n<mask token>\n\n\ndef shrink(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_train_balanced.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)\n\n\ndef shrink_validation(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_validation.npy'))\n main_data = np.load(os.path.join(base_path, 'main_validation.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new\n )\n\n\ndef appliance_1024to512(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_train = np.load(os.path.join(base_path,\n 'appliance_train_1000.npy'))\n appliance_validation = np.load(os.path.join(base_path,\n 'appliance_validation_1000.npy'))\n appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy')\n )\n at_new = []\n av_new = []\n ae_new = []\n for i in range(len(appliance_train)):\n at_temp = []\n for j in range(256, 768):\n at_temp.append(float(appliance_train[i][j]))\n at_new.append(at_temp)\n for i in range(len(appliance_validation)):\n av_temp = []\n for j in range(256, 768):\n av_temp.append(float(appliance_validation[i][j]))\n av_new.append(av_temp)\n for i in range(len(appliance_test)):\n ae_temp = []\n for j in range(256, 768):\n ae_temp.append(float(appliance_test[i][j]))\n ae_new.append(ae_temp)\n np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)\n np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)\n\n\ndef shrink_test(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))\n main_data = np.load(os.path.join(base_path, 'main_test.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)\n np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef align_process(house_id):\n data = np.load('data\\\\REFIT\\\\original_data\\\\%d.npy' % house_id)\n new_data = []\n current_index = 0\n current_time = int(data[0][0])\n end_time = int(data[-1][0]) + 8\n interval_threshold = refit_cfg.separation_threshold\n isend = 0\n data_length = len(data)\n while current_time <= end_time:\n current_interval = int(data[current_index + 1][0]) - int(data[\n current_index][0])\n if current_interval < interval_threshold:\n if current_time > int(data[current_index][0]):\n temp_index = current_index + 1\n while current_time > int(data[temp_index][0]):\n temp_index += 1\n if temp_index > data_length - 1:\n temp_index -= 1\n break\n if abs(current_time - int(data[temp_index - 1][0])) > abs(\n int(data[temp_index][0]) - current_time):\n current_index = temp_index\n if temp_index == data_length - 1:\n print('The end!')\n isend = 1\n else:\n current_index = temp_index - 1\n t = []\n for element in data[current_index]:\n t.append(element)\n t[0] = current_time\n new_data.append(t)\n if isend == 1:\n break\n current_time += 8\n if current_index % 1000 == 0:\n print('House %d processing: %f' % (house_id, current_index /\n data_length))\n else:\n current_index += 1\n current_time = int(data[current_index][0])\n np.save('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id, new_data)\n\n\ndef visual(house_id, channel_id, start, length):\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n print(len(data))\n target = []\n c = channel_id + 1\n for r in data:\n target.append(int(r[c]))\n y = target[start:start + length]\n plt.plot(y)\n plt.show()\n\n\ndef diff(house_id):\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n d = []\n for i in range(len(data) - 1):\n d.append(int(data[i + 1][0]) - int(data[i][0]))\n plt.plot(d)\n plt.show()\n plt.close()\n\n\ndef appliance_separation(dict, appliance_name):\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id + 1]])\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)),\n appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\ndef show_appliance(house_id, appliance_name):\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef appliance_separation(dict, appliance_name):\n \"\"\"\n 将各个电器的数据进行分解,放置到appliance_data文件夹下对应电器的文件夹中,以house_id和channel_id进行命名\n :param dict: 电器数据来源\n :param appliance_name: 当前电器的名称,用以创建文件夹\n :return:\n \"\"\"\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id + 1]])\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)),\n appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\ndef show_appliance(house_id, appliance_name):\n \"\"\"\n 具体观察每个电器的图形表示,将大段的数据缺失或者数据错误进行标注,构造cull_dict字典,在cull进行片段删除\n :param house_id:\n :param appliance_name:\n :return:\n \"\"\"\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n \"\"\"\n 根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对\n :param cull_dict:\n :return:\n \"\"\"\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling_2\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' %\n (appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef separate(appliance_name):\n window_width = refit_cfg.window_width[appliance_name]\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n count = 0\n appliance_train_validation = []\n appliance_test = []\n main_train_validation = []\n main_test = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name\n ].items():\n appliance_train_validation.clear()\n main_train_validation.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(window_width * r)\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n random_clip = refit_cfg.random_clip[appliance_name]\n for i in range(random_clip):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 2: House %d %f' % (house_id, i / random_clip))\n print('Train & Validation: House %d %s complete!' % (house_id,\n appliance_name))\n np.save(os.path.join(data_path, \n '1024\\\\appliance_train_validation_%d.npy' % house_id),\n appliance_train_validation)\n np.save(os.path.join(data_path, \n '1024\\\\main_train_validation_%d.npy' % house_id),\n main_train_validation)\n count = 0\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_test.clear()\n main_test.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(r * window_width)\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n for i in range(refit_cfg.random_clip[appliance_name]):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 2: House %d %f' % (house_id, i / data_length))\n print('Test 2: House %d %s complete!' % (house_id, appliance_name))\n np.save(os.path.join(data_path, '1024\\\\appliance_test_%d.npy' %\n house_id), appliance_test)\n np.save(os.path.join(data_path, '1024\\\\main_test_%d.npy' % house_id\n ), main_test)\n\n\ndef clip_visual(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train_.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_.npy'))\n print('Data load complete!')\n loop = 1000\n x = np.linspace(256, 768, 512)\n length = len(appliance_data)\n for i in range(loop):\n r = int(random.random() * length)\n plt.figure(figsize=(25, 10), dpi=100)\n plt.subplot(211)\n plt.xlim(0, 1024)\n plt.plot(main_data[r])\n plt.subplot(212)\n plt.xlim(0, 1024)\n plt.plot(x, appliance_data[r])\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % i))\n plt.close()\n\n\ndef train_validation_split(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = np.load(os.path.join(data_path,\n 'appliance_train_validation.npy'))\n main = np.load(os.path.join(data_path, 'main_train_validation.npy'))\n appliance_train, appliance_validation, main_train, main_validation = (\n train_test_split(appliance, main, test_size=0.2))\n print(len(appliance_train))\n print(len(main_train))\n np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)\n np.save(os.path.join(data_path, 'main_train.npy'), main_train)\n np.save(os.path.join(data_path, 'appliance_validation.npy'),\n appliance_validation)\n np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)\n\n\ndef data_integration(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = []\n main = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name\n ].items():\n appliance_data = np.load(os.path.join(data_path, \n 'appliance_train_validation_%d.npy' % house_id))\n main_data = np.load(os.path.join(data_path, \n 'main_train_validation_%d.npy' % house_id))\n for i in appliance_data:\n appliance.append(i)\n for i in main_data:\n main.append(i)\n print(len(appliance))\n print(len(main))\n np.save(os.path.join(data_path, 'appliance_train_validation.npy'),\n appliance)\n np.save(os.path.join(data_path, 'main_train_validation.npy'), main)\n appliance_test = []\n main_test = []\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_data = np.load(os.path.join(data_path, \n 'appliance_test_%d.npy' % house_id))\n main_data = np.load(os.path.join(data_path, 'main_test_%d.npy' %\n house_id))\n for i in appliance_data:\n appliance_test.append(i)\n for i in main_data:\n main_test.append(i)\n print(len(appliance_test))\n print(len(main_test))\n np.save(os.path.join(data_path, 'appliance_test.npy'), appliance_test)\n np.save(os.path.join(data_path, 'main_test.npy'), main_test)\n\n\ndef positive_negative(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]\n d = {}\n for i in range(len(threshold)):\n d[threshold[i]] = 0\n print(d)\n for th in threshold:\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > th:\n d[th] += 1\n print('Thres %d complete!' % th)\n for thres, count in d.items():\n print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data),\n count / len(appliance_data)))\n\n\ndef clip_view(appliance_name, thres):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > thres:\n plt.figure(figsize=(25, 10), dpi=100)\n plt.plot(i.astype(int))\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % count))\n plt.close()\n count += 1\n\n\ndef test_process(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))\n temp = [0.0] * 512\n new_app = []\n for i in range(len(appliance_data)):\n max = np.max(appliance_data[i])\n if max < 0.05:\n print(max)\n new_app.append(temp)\n else:\n new_app.append(appliance_data[i])\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)\n\n\ndef separate_positive_negative(appliance_name, thres, peak):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train.npy'))\n count = 0\n appliance_positive = []\n appliance_negative = []\n main_positive = []\n main_negative = []\n appliance_temp = [0] * 1024\n for i in range(len(appliance_data)):\n sum = 0\n max = 0\n for j in appliance_data[i]:\n sum += int(j)\n for j in range(512):\n if int(appliance_data[i][j + 256]) > max:\n max = int(appliance_data[i][j + 256])\n if max < peak:\n sum = 0\n if sum > thres:\n appliance_positive.append(appliance_data[i])\n main_positive.append(main_data[i])\n else:\n appliance_negative.append(appliance_temp)\n main_negative.append(main_data[i])\n if i % 1000 == 0:\n print('Processing: %f' % (i / len(appliance_data)))\n np.save(os.path.join(base_path, 'appliance_positive.npy'),\n appliance_positive)\n np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)\n np.save(os.path.join(base_path, 'appliance_negative.npy'),\n appliance_negative)\n np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)\n\n\ndef generate_balanced_dataset(appliance_name, negative_ratio):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_positive = list(np.load(os.path.join(base_path,\n 'appliance_positive.npy')))\n appliance_negative = np.load(os.path.join(base_path,\n 'appliance_negative.npy'))\n main_positive = list(np.load(os.path.join(base_path, 'main_positive.npy')))\n main_negative = np.load(os.path.join(base_path, 'main_negative.npy'))\n print('Data load complete!')\n positive_length = len(appliance_positive)\n negative_length = len(appliance_negative)\n print('Postive length: %d negative length: %d' % (positive_length,\n negative_length))\n for i in range(int(positive_length * negative_ratio)):\n r = int(random.random() * negative_length)\n appliance_positive.append(appliance_negative[r])\n main_positive.append(main_negative[r])\n print('Data generate complete! length: %d' % len(appliance_positive))\n index = np.linspace(0, len(appliance_positive) - 1, len(appliance_positive)\n ).astype(int)\n random.shuffle(index)\n appliance_new = []\n main_new = []\n for i in index:\n appliance_new.append(appliance_positive[i])\n main_new.append(main_positive[i])\n print('Data shuffle complete!')\n np.save(os.path.join(base_path, 'appliance_train_balanced.npy'),\n appliance_new)\n np.save(os.path.join(base_path, 'main_train_balanced.npy'), main_new)\n print('Data save complete!')\n\n\ndef shrink(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_train_balanced.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)\n\n\ndef shrink_validation(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_validation.npy'))\n main_data = np.load(os.path.join(base_path, 'main_validation.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new\n )\n\n\ndef appliance_1024to512(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_train = np.load(os.path.join(base_path,\n 'appliance_train_1000.npy'))\n appliance_validation = np.load(os.path.join(base_path,\n 'appliance_validation_1000.npy'))\n appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy')\n )\n at_new = []\n av_new = []\n ae_new = []\n for i in range(len(appliance_train)):\n at_temp = []\n for j in range(256, 768):\n at_temp.append(float(appliance_train[i][j]))\n at_new.append(at_temp)\n for i in range(len(appliance_validation)):\n av_temp = []\n for j in range(256, 768):\n av_temp.append(float(appliance_validation[i][j]))\n av_new.append(av_temp)\n for i in range(len(appliance_test)):\n ae_temp = []\n for j in range(256, 768):\n ae_temp.append(float(appliance_test[i][j]))\n ae_new.append(ae_temp)\n np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)\n np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)\n\n\ndef shrink_test(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))\n main_data = np.load(os.path.join(base_path, 'main_test.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)\n np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.path.append('preprocess')\n<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\n\n\ndef align_process(house_id):\n data = np.load('data\\\\REFIT\\\\original_data\\\\%d.npy' % house_id)\n new_data = []\n current_index = 0\n current_time = int(data[0][0])\n end_time = int(data[-1][0]) + 8\n interval_threshold = refit_cfg.separation_threshold\n isend = 0\n data_length = len(data)\n while current_time <= end_time:\n current_interval = int(data[current_index + 1][0]) - int(data[\n current_index][0])\n if current_interval < interval_threshold:\n if current_time > int(data[current_index][0]):\n temp_index = current_index + 1\n while current_time > int(data[temp_index][0]):\n temp_index += 1\n if temp_index > data_length - 1:\n temp_index -= 1\n break\n if abs(current_time - int(data[temp_index - 1][0])) > abs(\n int(data[temp_index][0]) - current_time):\n current_index = temp_index\n if temp_index == data_length - 1:\n print('The end!')\n isend = 1\n else:\n current_index = temp_index - 1\n t = []\n for element in data[current_index]:\n t.append(element)\n t[0] = current_time\n new_data.append(t)\n if isend == 1:\n break\n current_time += 8\n if current_index % 1000 == 0:\n print('House %d processing: %f' % (house_id, current_index /\n data_length))\n else:\n current_index += 1\n current_time = int(data[current_index][0])\n np.save('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id, new_data)\n\n\ndef visual(house_id, channel_id, start, length):\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n print(len(data))\n target = []\n c = channel_id + 1\n for r in data:\n target.append(int(r[c]))\n y = target[start:start + length]\n plt.plot(y)\n plt.show()\n\n\ndef diff(house_id):\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n d = []\n for i in range(len(data) - 1):\n d.append(int(data[i + 1][0]) - int(data[i][0]))\n plt.plot(d)\n plt.show()\n plt.close()\n\n\ndef appliance_separation(dict, appliance_name):\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id + 1]])\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)),\n appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\ndef show_appliance(house_id, appliance_name):\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef appliance_separation(dict, appliance_name):\n \"\"\"\n 将各个电器的数据进行分解,放置到appliance_data文件夹下对应电器的文件夹中,以house_id和channel_id进行命名\n :param dict: 电器数据来源\n :param appliance_name: 当前电器的名称,用以创建文件夹\n :return:\n \"\"\"\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id + 1]])\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)),\n appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\ndef show_appliance(house_id, appliance_name):\n \"\"\"\n 具体观察每个电器的图形表示,将大段的数据缺失或者数据错误进行标注,构造cull_dict字典,在cull进行片段删除\n :param house_id:\n :param appliance_name:\n :return:\n \"\"\"\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n \"\"\"\n 根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对\n :param cull_dict:\n :return:\n \"\"\"\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling_2\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' %\n (appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list) - 1):\n _cull_list.append([cull_list[i][1], cull_list[i + 1][0]])\n _cull_list.append([cull_list[-1][1], len(data) - 1])\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' % (\n appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef separate(appliance_name):\n window_width = refit_cfg.window_width[appliance_name]\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n count = 0\n appliance_train_validation = []\n appliance_test = []\n main_train_validation = []\n main_test = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name\n ].items():\n appliance_train_validation.clear()\n main_train_validation.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(window_width * r)\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n random_clip = refit_cfg.random_clip[appliance_name]\n for i in range(random_clip):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 2: House %d %f' % (house_id, i / random_clip))\n print('Train & Validation: House %d %s complete!' % (house_id,\n appliance_name))\n np.save(os.path.join(data_path, \n '1024\\\\appliance_train_validation_%d.npy' % house_id),\n appliance_train_validation)\n np.save(os.path.join(data_path, \n '1024\\\\main_train_validation_%d.npy' % house_id),\n main_train_validation)\n count = 0\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_test.clear()\n main_test.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id,\n channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head + window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(r * window_width)\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 1: House %d %f' % (house_id, current_head /\n data_length))\n data_length -= window_width\n for i in range(refit_cfg.random_clip[appliance_name]):\n r = random.random()\n start = int(r * data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 2: House %d %f' % (house_id, i / data_length))\n print('Test 2: House %d %s complete!' % (house_id, appliance_name))\n np.save(os.path.join(data_path, '1024\\\\appliance_test_%d.npy' %\n house_id), appliance_test)\n np.save(os.path.join(data_path, '1024\\\\main_test_%d.npy' % house_id\n ), main_test)\n\n\ndef clip_visual(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train_.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_.npy'))\n print('Data load complete!')\n loop = 1000\n x = np.linspace(256, 768, 512)\n length = len(appliance_data)\n for i in range(loop):\n r = int(random.random() * length)\n plt.figure(figsize=(25, 10), dpi=100)\n plt.subplot(211)\n plt.xlim(0, 1024)\n plt.plot(main_data[r])\n plt.subplot(212)\n plt.xlim(0, 1024)\n plt.plot(x, appliance_data[r])\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % i))\n plt.close()\n\n\ndef train_validation_split(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = np.load(os.path.join(data_path,\n 'appliance_train_validation.npy'))\n main = np.load(os.path.join(data_path, 'main_train_validation.npy'))\n appliance_train, appliance_validation, main_train, main_validation = (\n train_test_split(appliance, main, test_size=0.2))\n print(len(appliance_train))\n print(len(main_train))\n np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)\n np.save(os.path.join(data_path, 'main_train.npy'), main_train)\n np.save(os.path.join(data_path, 'appliance_validation.npy'),\n appliance_validation)\n np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)\n\n\ndef data_integration(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = []\n main = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name\n ].items():\n appliance_data = np.load(os.path.join(data_path, \n 'appliance_train_validation_%d.npy' % house_id))\n main_data = np.load(os.path.join(data_path, \n 'main_train_validation_%d.npy' % house_id))\n for i in appliance_data:\n appliance.append(i)\n for i in main_data:\n main.append(i)\n print(len(appliance))\n print(len(main))\n np.save(os.path.join(data_path, 'appliance_train_validation.npy'),\n appliance)\n np.save(os.path.join(data_path, 'main_train_validation.npy'), main)\n appliance_test = []\n main_test = []\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_data = np.load(os.path.join(data_path, \n 'appliance_test_%d.npy' % house_id))\n main_data = np.load(os.path.join(data_path, 'main_test_%d.npy' %\n house_id))\n for i in appliance_data:\n appliance_test.append(i)\n for i in main_data:\n main_test.append(i)\n print(len(appliance_test))\n print(len(main_test))\n np.save(os.path.join(data_path, 'appliance_test.npy'), appliance_test)\n np.save(os.path.join(data_path, 'main_test.npy'), main_test)\n\n\ndef positive_negative(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]\n d = {}\n for i in range(len(threshold)):\n d[threshold[i]] = 0\n print(d)\n for th in threshold:\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > th:\n d[th] += 1\n print('Thres %d complete!' % th)\n for thres, count in d.items():\n print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data),\n count / len(appliance_data)))\n\n\ndef clip_view(appliance_name, thres):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > thres:\n plt.figure(figsize=(25, 10), dpi=100)\n plt.plot(i.astype(int))\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % count))\n plt.close()\n count += 1\n\n\ndef test_process(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))\n temp = [0.0] * 512\n new_app = []\n for i in range(len(appliance_data)):\n max = np.max(appliance_data[i])\n if max < 0.05:\n print(max)\n new_app.append(temp)\n else:\n new_app.append(appliance_data[i])\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)\n\n\ndef separate_positive_negative(appliance_name, thres, peak):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train.npy'))\n count = 0\n appliance_positive = []\n appliance_negative = []\n main_positive = []\n main_negative = []\n appliance_temp = [0] * 1024\n for i in range(len(appliance_data)):\n sum = 0\n max = 0\n for j in appliance_data[i]:\n sum += int(j)\n for j in range(512):\n if int(appliance_data[i][j + 256]) > max:\n max = int(appliance_data[i][j + 256])\n if max < peak:\n sum = 0\n if sum > thres:\n appliance_positive.append(appliance_data[i])\n main_positive.append(main_data[i])\n else:\n appliance_negative.append(appliance_temp)\n main_negative.append(main_data[i])\n if i % 1000 == 0:\n print('Processing: %f' % (i / len(appliance_data)))\n np.save(os.path.join(base_path, 'appliance_positive.npy'),\n appliance_positive)\n np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)\n np.save(os.path.join(base_path, 'appliance_negative.npy'),\n appliance_negative)\n np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)\n\n\ndef generate_balanced_dataset(appliance_name, negative_ratio):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_positive = list(np.load(os.path.join(base_path,\n 'appliance_positive.npy')))\n appliance_negative = np.load(os.path.join(base_path,\n 'appliance_negative.npy'))\n main_positive = list(np.load(os.path.join(base_path, 'main_positive.npy')))\n main_negative = np.load(os.path.join(base_path, 'main_negative.npy'))\n print('Data load complete!')\n positive_length = len(appliance_positive)\n negative_length = len(appliance_negative)\n print('Postive length: %d negative length: %d' % (positive_length,\n negative_length))\n for i in range(int(positive_length * negative_ratio)):\n r = int(random.random() * negative_length)\n appliance_positive.append(appliance_negative[r])\n main_positive.append(main_negative[r])\n print('Data generate complete! length: %d' % len(appliance_positive))\n index = np.linspace(0, len(appliance_positive) - 1, len(appliance_positive)\n ).astype(int)\n random.shuffle(index)\n appliance_new = []\n main_new = []\n for i in index:\n appliance_new.append(appliance_positive[i])\n main_new.append(main_positive[i])\n print('Data shuffle complete!')\n np.save(os.path.join(base_path, 'appliance_train_balanced.npy'),\n appliance_new)\n np.save(os.path.join(base_path, 'main_train_balanced.npy'), main_new)\n print('Data save complete!')\n\n\ndef shrink(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_train_balanced.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)\n\n\ndef shrink_validation(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path,\n 'appliance_validation.npy'))\n main_data = np.load(os.path.join(base_path, 'main_validation.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale),\n appliance_new)\n np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new\n )\n\n\ndef appliance_1024to512(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_train = np.load(os.path.join(base_path,\n 'appliance_train_1000.npy'))\n appliance_validation = np.load(os.path.join(base_path,\n 'appliance_validation_1000.npy'))\n appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy')\n )\n at_new = []\n av_new = []\n ae_new = []\n for i in range(len(appliance_train)):\n at_temp = []\n for j in range(256, 768):\n at_temp.append(float(appliance_train[i][j]))\n at_new.append(at_temp)\n for i in range(len(appliance_validation)):\n av_temp = []\n for j in range(256, 768):\n av_temp.append(float(appliance_validation[i][j]))\n av_new.append(av_temp)\n for i in range(len(appliance_test)):\n ae_temp = []\n for j in range(256, 768):\n ae_temp.append(float(appliance_test[i][j]))\n ae_new.append(ae_temp)\n np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)\n np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)\n\n\ndef shrink_test(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))\n main_data = np.load(os.path.join(base_path, 'main_test.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j]) / scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j]) / scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)\n np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)\n\n\nif __name__ == '__main__':\n appliance_name = 'WashingMachine'\n separate(appliance_name)\n data_integration(appliance_name)\n train_validation_split(appliance_name)\n separate_positive_negative(appliance_name, 1500, 20)\n generate_balanced_dataset(appliance_name, 1)\n shrink(appliance_name, 1000)\n shrink_validation(appliance_name, 1000)\n shrink_test(appliance_name, 1000)\n appliance_1024to512(appliance_name)\n print('Process complete!!!')\n",
"step-5": "import sys\nsys.path.append('preprocess')\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import savefig\nimport numpy as np\nimport refit_cfg\nimport os\nimport random\nfrom sklearn.model_selection import train_test_split\n\n\nname = ['WashingMachine', 'Kettle', 'Microwave', 'Fridge', 'Dishwasher']\nappliance_dict = {\n 'WashingMachine': refit_cfg.washingmachine,\n 'Kettle': refit_cfg.kettle,\n 'Microwave': refit_cfg.microwave,\n 'Fridge': refit_cfg.fridge,\n 'Dishwasher': refit_cfg.dishwasher\n}\n\n\ndef align_process(house_id):\n data = np.load('data\\\\REFIT\\\\original_data\\\\%d.npy' % house_id)\n new_data = []\n current_index = 0\n current_time = int(data[0][0])\n end_time = int(data[-1][0]) + 8\n interval_threshold = refit_cfg.separation_threshold\n isend = 0\n data_length = len(data)\n\n while current_time <= end_time:\n current_interval = int(data[current_index+1][0]) - int(data[current_index][0])\n if current_interval < interval_threshold: # small interval\n if current_time > int(data[current_index][0]):\n temp_index = current_index + 1\n while current_time > int(data[temp_index][0]):\n temp_index += 1\n if temp_index > (data_length-1):\n temp_index -= 1\n break\n\n if abs(current_time - int(data[temp_index-1][0])) > abs(int(data[temp_index][0])-current_time):\n current_index = temp_index\n if temp_index == (data_length-1):\n print('The end!')\n isend = 1\n else:\n current_index = temp_index - 1\n t = []\n for element in data[current_index]:\n t.append(element)\n t[0] = current_time\n new_data.append(t)\n if isend == 1:\n break\n current_time += 8\n if current_index % 1000 == 0:\n print('House %d processing: %f' % (house_id, current_index/data_length))\n else: # big interval\n current_index += 1\n current_time = int(data[current_index][0])\n\n np.save('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id, new_data)\n\n\ndef visual(house_id, channel_id, start, length):\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n print(len(data))\n target = []\n c = channel_id+1\n for r in data:\n target.append(int(r[c]))\n y = target[start:start+length]\n plt.plot(y)\n plt.show()\n\n\ndef diff(house_id):\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n d = []\n for i in range(len(data)-1):\n d.append(int(data[i+1][0])-int(data[i][0]))\n plt.plot(d)\n plt.show()\n plt.close()\n\n\ndef appliance_separation(dict, appliance_name):\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id+1]])\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)), appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\ndef show_appliance(house_id, appliance_name):\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_align\\\\%s\\\\%d_%d.npy' % (appliance_name, house_id, channel_id))\n new_data = []\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list)-1):\n _cull_list.append([cull_list[i][1], cull_list[i+1][0]])\n _cull_list.append([cull_list[-1][1], (len(data)-1)])\n\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef appliance_separation(dict, appliance_name):\n \"\"\"\n 将各个电器的数据进行分解,放置到appliance_data文件夹下对应电器的文件夹中,以house_id和channel_id进行命名\n :param dict: 电器数据来源\n :param appliance_name: 当前电器的名称,用以创建文件夹\n :return:\n \"\"\"\n path = 'data\\\\REFIT\\\\appliance_data\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n\n for house_id, channel_id in dict.items():\n data = np.load('data\\\\REFIT\\\\after_align\\\\%d.npy' % house_id)\n appliance_data = []\n for row in data:\n appliance_data.append([row[1], row[channel_id+1]]) # 将mains 和 appliance 作为一条单独的记录\n np.save(os.path.join(path, '%d_%d.npy' % (house_id, channel_id)), appliance_data)\n print('Appliance %s House %d complete!' % (appliance_name, house_id))\n\n\ndef show_appliance(house_id, appliance_name):\n \"\"\"\n 具体观察每个电器的图形表示,将大段的数据缺失或者数据错误进行标注,构造cull_dict字典,在cull进行片段删除\n :param house_id:\n :param appliance_name:\n :return:\n \"\"\"\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling\\\\%s\\\\%d_%d.npy' % (appliance_name, house_id, channel_id))\n print(len(data))\n mains = []\n app = []\n for i in data:\n mains.append(int(i[0]))\n app.append(int(i[1]))\n plt.figure(figsize=(20, 8))\n plt.plot(mains)\n plt.plot(app)\n plt.show()\n\n\ndef cull(cull_dict):\n \"\"\"\n 根据画的图,将大段的空缺段进行删除,删除之后,需要进行比对\n :param cull_dict:\n :return:\n \"\"\"\n for appliance_name, _dict in cull_dict.items():\n path = 'data\\\\REFIT\\\\after_culling_2\\\\%s' % appliance_name\n if not os.path.exists(path):\n os.mkdir(path)\n for house_id, cull_list in _dict.items():\n channel_id = appliance_dict[appliance_name][house_id]\n data = np.load('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' % (appliance_name, house_id, channel_id))\n new_data = []\n # 对cull_list进行变形,变成表征合理数据的区间\n _cull_list = [[0, cull_list[0][0]]]\n for i in range(len(cull_list)-1):\n _cull_list.append([cull_list[i][1], cull_list[i+1][0]])\n _cull_list.append([cull_list[-1][1], (len(data)-1)])\n\n for i in _cull_list:\n if i[1] - i[0] != 0:\n for j in range(i[0], i[1]):\n new_data.append(data[j])\n np.save('data\\\\REFIT\\\\after_culling_2\\\\%s\\\\%d_%d.npy' % (appliance_name, house_id, channel_id), new_data)\n print('House %d %s complete!' % (house_id, appliance_name))\n\n\ndef separate(appliance_name):\n window_width = refit_cfg.window_width[appliance_name]\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n count = 0\n appliance_train_validation = []\n appliance_test = []\n main_train_validation = []\n main_test = []\n\n for house_id, channel_id in refit_cfg.train_validation[appliance_name].items():\n # train & validation\n appliance_train_validation.clear()\n main_train_validation.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id, channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head+window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(window_width*r)\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 1: House %d %f' % (house_id, (current_head / data_length)))\n\n data_length -= window_width\n random_clip = refit_cfg.random_clip[appliance_name]\n for i in range(random_clip):\n r = random.random()\n start = int(r*data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_train_validation.append(temp_appliance)\n main_train_validation.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('T & V 2: House %d %f' % (house_id, (i / random_clip)))\n print('Train & Validation: House %d %s complete!' % (house_id, appliance_name))\n np.save(os.path.join(data_path, '1024\\\\appliance_train_validation_%d.npy' % house_id), appliance_train_validation)\n np.save(os.path.join(data_path, '1024\\\\main_train_validation_%d.npy' % house_id), main_train_validation)\n\n\n # test\n count = 0\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_test.clear()\n main_test.clear()\n data = np.load(os.path.join(data_path, '%s_%s.npy' % (house_id, channel_id)))\n current_head = 0\n data_length = len(data)\n end = data_length - window_width - 1\n while current_head < end:\n temp_main = []\n temp_appliance = []\n for i in range(current_head, current_head+window_width):\n temp_main.append(data[i][0])\n temp_appliance.append(data[i][1])\n r = random.random()\n current_head += int(r*window_width)\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 1: House %d %f' % (house_id, (current_head / data_length)))\n\n data_length -= window_width\n for i in range(refit_cfg.random_clip[appliance_name]):\n r = random.random()\n start = int(r*data_length)\n temp_main = []\n temp_appliance = []\n for j in range(start, start + window_width):\n temp_main.append(data[j][0])\n temp_appliance.append(data[j][1])\n appliance_test.append(temp_appliance)\n main_test.append(temp_main)\n count += 1\n if count % 1000 == 0:\n print('Test 2: House %d %f' % (house_id, (i / data_length)))\n print('Test 2: House %d %s complete!' % (house_id, appliance_name))\n np.save(os.path.join(data_path, '1024\\\\appliance_test_%d.npy' % house_id), appliance_test)\n np.save(os.path.join(data_path, '1024\\\\main_test_%d.npy' % house_id), main_test)\n\n\ndef clip_visual(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train_.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_.npy'))\n print('Data load complete!')\n loop = 1000\n x = np.linspace(256, 768, 512)\n length = len(appliance_data)\n for i in range(loop):\n r = int(random.random()*length)\n plt.figure(figsize=(25, 10), dpi=100)\n plt.subplot(211)\n plt.xlim(0, 1024)\n plt.plot(main_data[r])\n plt.subplot(212)\n plt.xlim(0, 1024)\n plt.plot(x, appliance_data[r])\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % i))\n plt.close()\n\n\ndef train_validation_split(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = np.load(os.path.join(data_path, 'appliance_train_validation.npy'))\n main = np.load(os.path.join(data_path, 'main_train_validation.npy'))\n appliance_train, appliance_validation, main_train, main_validation = \\\n train_test_split(appliance, main, test_size=0.2)\n print(len(appliance_train))\n print(len(main_train))\n\n np.save(os.path.join(data_path, 'appliance_train.npy'), appliance_train)\n np.save(os.path.join(data_path, 'main_train.npy'), main_train)\n np.save(os.path.join(data_path, 'appliance_validation.npy'), appliance_validation)\n np.save(os.path.join(data_path, 'main_validation.npy'), main_validation)\n\n\ndef data_integration(appliance_name):\n data_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance = []\n main = []\n for house_id, channel_id in refit_cfg.train_validation[appliance_name].items():\n appliance_data = np.load(os.path.join(data_path, 'appliance_train_validation_%d.npy' % house_id))\n main_data = np.load(os.path.join(data_path, 'main_train_validation_%d.npy' % house_id))\n for i in appliance_data:\n appliance.append(i)\n for i in main_data:\n main.append(i)\n\n print(len(appliance))\n print(len(main))\n np.save(os.path.join(data_path, 'appliance_train_validation.npy'), appliance)\n np.save(os.path.join(data_path, 'main_train_validation.npy'), main)\n\n appliance_test = []\n main_test = []\n for house_id, channel_id in refit_cfg.test[appliance_name].items():\n appliance_data = np.load(os.path.join(data_path, 'appliance_test_%d.npy' % house_id))\n main_data = np.load(os.path.join(data_path, 'main_test_%d.npy' % house_id))\n for i in appliance_data:\n appliance_test.append(i)\n for i in main_data:\n main_test.append(i)\n\n print(len(appliance_test))\n print(len(main_test))\n np.save(os.path.join(data_path, 'appliance_test.npy'), appliance_test)\n np.save(os.path.join(data_path, 'main_test.npy'), main_test)\n\n\ndef positive_negative(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n threshold = [0, 50, 100, 200, 500, 1000, 2000, 5000, 10000]\n d = {}\n for i in range(len(threshold)):\n d[threshold[i]] = 0\n print(d)\n\n for th in threshold:\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > th:\n d[th] += 1\n print('Thres %d complete!' % th)\n\n for thres, count in d.items():\n print('Thres: %d %d/%d %f' % (thres, count, len(appliance_data), count/len(appliance_data)))\n\n\ndef clip_view(appliance_name, thres):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n count = 0\n\n for i in appliance_data:\n sum = 0\n for j in i:\n sum += int(j)\n if sum > thres:\n plt.figure(figsize=(25, 10), dpi=100)\n plt.plot(i.astype(int))\n savefig(os.path.join(base_path, 'clip_view\\\\%d.jpg' % count))\n plt.close()\n count += 1\n\n\ndef test_process(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test_512.npy'))\n temp = [0.0]*512\n new_app = []\n for i in range(len(appliance_data)):\n max = np.max(appliance_data[i])\n if max < 0.05:\n print(max)\n new_app.append(temp)\n else:\n new_app.append(appliance_data[i])\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), new_app)\n\n\ndef separate_positive_negative(appliance_name, thres, peak):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train.npy'))\n count = 0\n appliance_positive = []\n appliance_negative = []\n main_positive = []\n main_negative = []\n appliance_temp = [0] * 1024\n\n for i in range(len(appliance_data)):\n sum = 0\n max = 0\n for j in appliance_data[i]:\n sum += int(j)\n for j in range(512):\n if int(appliance_data[i][j+256]) > max:\n max = int(appliance_data[i][j+256])\n if max < peak:\n sum = 0\n if sum > thres:\n appliance_positive.append(appliance_data[i])\n main_positive.append(main_data[i])\n else:\n appliance_negative.append(appliance_temp)\n main_negative.append(main_data[i])\n if i % 1000 == 0:\n print('Processing: %f' % (i/len(appliance_data)))\n\n np.save(os.path.join(base_path, 'appliance_positive.npy'), appliance_positive)\n np.save(os.path.join(base_path, 'main_positive.npy'), main_positive)\n np.save(os.path.join(base_path, 'appliance_negative.npy'), appliance_negative)\n np.save(os.path.join(base_path, 'main_negative.npy'), main_negative)\n\n\ndef generate_balanced_dataset(appliance_name, negative_ratio):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_positive = list(np.load(os.path.join(base_path, 'appliance_positive.npy')))\n appliance_negative = np.load(os.path.join(base_path, 'appliance_negative.npy'))\n main_positive = list(np.load(os.path.join(base_path, 'main_positive.npy')))\n main_negative = np.load(os.path.join(base_path, 'main_negative.npy'))\n print('Data load complete!')\n\n positive_length = len(appliance_positive)\n negative_length = len(appliance_negative)\n print('Postive length: %d negative length: %d' % (positive_length, negative_length))\n for i in range(int(positive_length*negative_ratio)):\n r = int(random.random()*negative_length)\n appliance_positive.append(appliance_negative[r])\n main_positive.append(main_negative[r])\n print('Data generate complete! length: %d' % (len(appliance_positive)))\n\n index = np.linspace(0, len(appliance_positive)-1, len(appliance_positive)).astype(int)\n random.shuffle(index)\n appliance_new = []\n main_new = []\n\n for i in index:\n appliance_new.append(appliance_positive[i])\n main_new.append(main_positive[i])\n print('Data shuffle complete!')\n\n np.save(os.path.join(base_path, 'appliance_train_balanced.npy'), appliance_new)\n np.save(os.path.join(base_path, 'main_train_balanced.npy'), main_new)\n print('Data save complete!')\n\n\ndef shrink(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_train_balanced.npy'))\n main_data = np.load(os.path.join(base_path, 'main_train_balanced.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j])/scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j])/scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n\n np.save(os.path.join(base_path, 'appliance_train_%d.npy' % scale), appliance_new)\n np.save(os.path.join(base_path, 'main_train_%d.npy' % scale), main_new)\n\n\ndef shrink_validation(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_validation.npy'))\n main_data = np.load(os.path.join(base_path, 'main_validation.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j])/scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j])/scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n\n np.save(os.path.join(base_path, 'appliance_validation_%d.npy' % scale), appliance_new)\n np.save(os.path.join(base_path, 'main_validation_%d.npy' % scale), main_new)\n\n\ndef appliance_1024to512(appliance_name):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_train = np.load(os.path.join(base_path, 'appliance_train_1000.npy'))\n appliance_validation = np.load(os.path.join(base_path, 'appliance_validation_1000.npy'))\n appliance_test = np.load(os.path.join(base_path, 'appliance_test_1000.npy'))\n at_new = []\n av_new = []\n ae_new = []\n\n for i in range(len(appliance_train)):\n at_temp = []\n for j in range(256, 768):\n at_temp.append(float(appliance_train[i][j]))\n at_new.append(at_temp)\n for i in range(len(appliance_validation)):\n av_temp = []\n for j in range(256, 768):\n av_temp.append(float(appliance_validation[i][j]))\n av_new.append(av_temp)\n for i in range(len(appliance_test)):\n ae_temp = []\n for j in range(256, 768):\n ae_temp.append(float(appliance_test[i][j]))\n ae_new.append(ae_temp)\n\n np.save(os.path.join(base_path, 'appliance_train_512.npy'), at_new)\n np.save(os.path.join(base_path, 'appliance_validation_512.npy'), av_new)\n np.save(os.path.join(base_path, 'appliance_test_512.npy'), ae_new)\n\n\ndef shrink_test(appliance_name, scale):\n base_path = 'data\\\\REFIT\\\\after_culling\\\\%s\\\\1024' % appliance_name\n appliance_data = np.load(os.path.join(base_path, 'appliance_test.npy'))\n main_data = np.load(os.path.join(base_path, 'main_test.npy'))\n appliance_new = []\n main_new = []\n print('Data load complete!')\n\n for i in range(len(appliance_data)):\n appliance_temp = []\n main_temp = []\n for j in range(len(appliance_data[i])):\n appliance_temp.append(float(int(appliance_data[i][j])/scale))\n for j in range(len(main_data[i])):\n main_temp.append(float(int(main_data[i][j])/scale))\n appliance_new.append(appliance_temp)\n main_new.append(main_temp)\n print('Process complete!')\n\n np.save(os.path.join(base_path, 'appliance_test_1000.npy'), appliance_new)\n np.save(os.path.join(base_path, 'main_test_1000.npy'), main_new)\n\n\nif __name__ == '__main__':\n appliance_name = 'WashingMachine'\n separate(appliance_name)\n data_integration(appliance_name)\n train_validation_split(appliance_name)\n separate_positive_negative(appliance_name, 1500, 20)\n generate_balanced_dataset(appliance_name, 1)\n shrink(appliance_name, 1000)\n shrink_validation(appliance_name, 1000)\n shrink_test(appliance_name, 1000)\n appliance_1024to512(appliance_name)\n # test_process(appliance_name)\n print('Process complete!!!')\n",
"step-ids": [
14,
15,
22,
23,
26
]
}
|
[
14,
15,
22,
23,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Leave)
admin.site.register(EmployeeProfile)
<|reserved_special_token_1|>
from django.contrib import admin
from employees.models import Leave, EmployeeProfile
admin.site.register(Leave)
admin.site.register(EmployeeProfile)
<|reserved_special_token_1|>
from django.contrib import admin
from employees.models import Leave,EmployeeProfile
admin.site.register(Leave)
admin.site.register(EmployeeProfile)
# Register your models here.
|
flexible
|
{
"blob_id": "77ea670b537e9ff7082aeb9ed54b011fa8e3a035",
"index": 6328,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Leave)\nadmin.site.register(EmployeeProfile)\n",
"step-3": "from django.contrib import admin\nfrom employees.models import Leave, EmployeeProfile\nadmin.site.register(Leave)\nadmin.site.register(EmployeeProfile)\n",
"step-4": "from django.contrib import admin\r\nfrom employees.models import Leave,EmployeeProfile\r\n\r\nadmin.site.register(Leave)\r\nadmin.site.register(EmployeeProfile)\r\n# Register your models here.\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class testsolution(TestCase):
def setUp(self):
self.solution = Solution()
self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,
64, 320, 40, 160])]
def test_productExceptSelf(self):
for p1, p2 in self.inout:
with self.subTest(input=p1, expected=p2):
self.assertEqual(self.solution.productExceptSelf(p1), p2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
class testsolution(TestCase):
def setUp(self):
self.solution = Solution()
self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,
64, 320, 40, 160])]
def test_productExceptSelf(self):
for p1, p2 in self.inout:
with self.subTest(input=p1, expected=p2):
self.assertEqual(self.solution.productExceptSelf(p1), p2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def productExceptSelf(self, nums):
right, rs = 1, [1] * len(nums)
for i in range(1, len(nums)):
rs[i] = nums[i - 1] * rs[i - 1]
for i in range(len(nums) - 1, -1, -1):
rs[i], right = rs[i] * right, right * nums[i]
return rs
class testsolution(TestCase):
def setUp(self):
self.solution = Solution()
self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,
64, 320, 40, 160])]
def test_productExceptSelf(self):
for p1, p2 in self.inout:
with self.subTest(input=p1, expected=p2):
self.assertEqual(self.solution.productExceptSelf(p1), p2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def productExceptSelf(self, nums):
right, rs = 1, [1] * len(nums)
for i in range(1, len(nums)):
rs[i] = nums[i - 1] * rs[i - 1]
for i in range(len(nums) - 1, -1, -1):
rs[i], right = rs[i] * right, right * nums[i]
return rs
class testsolution(TestCase):
def setUp(self):
self.solution = Solution()
self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,
64, 320, 40, 160])]
def test_productExceptSelf(self):
for p1, p2 in self.inout:
with self.subTest(input=p1, expected=p2):
self.assertEqual(self.solution.productExceptSelf(p1), p2)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from unittest import TestCase, main
class Solution:
def productExceptSelf(self, nums):
right, rs = 1, [1]*len(nums)
for i in range(1,len(nums)): rs[i] = nums[i-1]*rs[i-1]
for i in range(len(nums)-1, -1, -1): rs[i], right = rs[i]*right, right*nums[i]
return rs
class testsolution(TestCase):
def setUp(self):
self.solution = Solution()
self.inout = [
([1,2,3,4], [24,12,8,6]),
([4,5,1,8,2], [80,64,320,40,160])
]
def test_productExceptSelf(self):
for p1, p2 in self.inout:
with self.subTest(input=p1, expected=p2):
self.assertEqual(self.solution.productExceptSelf(p1), p2)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "9e34fcec3af746af37cb68fd8617c706cc1066f6",
"index": 1743,
"step-1": "<mask token>\n\n\nclass testsolution(TestCase):\n\n def setUp(self):\n self.solution = Solution()\n self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,\n 64, 320, 40, 160])]\n\n def test_productExceptSelf(self):\n for p1, p2 in self.inout:\n with self.subTest(input=p1, expected=p2):\n self.assertEqual(self.solution.productExceptSelf(p1), p2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\nclass testsolution(TestCase):\n\n def setUp(self):\n self.solution = Solution()\n self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,\n 64, 320, 40, 160])]\n\n def test_productExceptSelf(self):\n for p1, p2 in self.inout:\n with self.subTest(input=p1, expected=p2):\n self.assertEqual(self.solution.productExceptSelf(p1), p2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def productExceptSelf(self, nums):\n right, rs = 1, [1] * len(nums)\n for i in range(1, len(nums)):\n rs[i] = nums[i - 1] * rs[i - 1]\n for i in range(len(nums) - 1, -1, -1):\n rs[i], right = rs[i] * right, right * nums[i]\n return rs\n\n\nclass testsolution(TestCase):\n\n def setUp(self):\n self.solution = Solution()\n self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,\n 64, 320, 40, 160])]\n\n def test_productExceptSelf(self):\n for p1, p2 in self.inout:\n with self.subTest(input=p1, expected=p2):\n self.assertEqual(self.solution.productExceptSelf(p1), p2)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def productExceptSelf(self, nums):\n right, rs = 1, [1] * len(nums)\n for i in range(1, len(nums)):\n rs[i] = nums[i - 1] * rs[i - 1]\n for i in range(len(nums) - 1, -1, -1):\n rs[i], right = rs[i] * right, right * nums[i]\n return rs\n\n\nclass testsolution(TestCase):\n\n def setUp(self):\n self.solution = Solution()\n self.inout = [([1, 2, 3, 4], [24, 12, 8, 6]), ([4, 5, 1, 8, 2], [80,\n 64, 320, 40, 160])]\n\n def test_productExceptSelf(self):\n for p1, p2 in self.inout:\n with self.subTest(input=p1, expected=p2):\n self.assertEqual(self.solution.productExceptSelf(p1), p2)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from unittest import TestCase, main\n\nclass Solution:\n def productExceptSelf(self, nums):\n right, rs = 1, [1]*len(nums)\n for i in range(1,len(nums)): rs[i] = nums[i-1]*rs[i-1]\n for i in range(len(nums)-1, -1, -1): rs[i], right = rs[i]*right, right*nums[i]\n return rs\n\nclass testsolution(TestCase):\n def setUp(self):\n self.solution = Solution()\n self.inout = [\n ([1,2,3,4], [24,12,8,6]),\n ([4,5,1,8,2], [80,64,320,40,160])\n ]\n def test_productExceptSelf(self):\n for p1, p2 in self.inout:\n with self.subTest(input=p1, expected=p2):\n self.assertEqual(self.solution.productExceptSelf(p1), p2)\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
def plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(
160), openning_direction='loc_x', rotation_interval=math.radians(22.5),
max_samples=100, min_dist_between_sampled_contact_points=0.005,
contact_offset=0.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,
min_dist_between_sampled_contact_points=
min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f'{i} of {len(contact_pairs)} done!')
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1
) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError('Openning direction must be loc_x or loc_y!')
grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,
gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,
gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=
'preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=
file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name=
'preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plan_contact_pairs(objcm, max_samples=100,
min_dist_between_sampled_contact_points=0.005,
angle_between_contact_normals=math.radians(160), toggle_sampled_points=
False):
"""
find the contact pairs using rayshooting
the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint
:param angle_between_contact_normals:
:param toggle_sampled_points
:return: [[contact_p0, contact_p1], ...]
author: weiwei
date: 20190805, 20210504
"""
contact_points, face_ids = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2)
contact_normals = objcm.objtrm.face_normals[face_ids]
contact_pairs = []
tree = cKDTree(contact_points)
near_history = np.array([0] * len(contact_points), dtype=bool)
for i, contact_p0 in enumerate(contact_points):
if near_history[i]:
continue
contact_n0 = contact_normals[i]
hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 *
0.001, contact_p0 - contact_n0 * 100)
if len(hit_points) > 0:
for contact_p1, contact_n1 in zip(hit_points, hit_normals):
if np.dot(contact_n0, contact_n1) < -math.cos(
angle_between_contact_normals):
near_points_indices = tree.query_ball_point(contact_p1,
min_dist_between_sampled_contact_points)
if len(near_points_indices):
for npi in near_points_indices:
if np.dot(contact_normals[npi], contact_n1
) > math.cos(angle_between_contact_normals):
near_history[npi] = True
contact_pairs.append([[contact_p0, contact_n0], [
contact_p1, contact_n1]])
if toggle_sampled_points:
return contact_pairs, contact_points
return contact_pairs
def plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(
160), openning_direction='loc_x', rotation_interval=math.radians(22.5),
max_samples=100, min_dist_between_sampled_contact_points=0.005,
contact_offset=0.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,
min_dist_between_sampled_contact_points=
min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f'{i} of {len(contact_pairs)} done!')
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1
) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError('Openning direction must be loc_x or loc_y!')
grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,
gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,
gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=
'preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=
file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name=
'preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plan_contact_pairs(objcm, max_samples=100,
min_dist_between_sampled_contact_points=0.005,
angle_between_contact_normals=math.radians(160), toggle_sampled_points=
False):
"""
find the contact pairs using rayshooting
the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint
:param angle_between_contact_normals:
:param toggle_sampled_points
:return: [[contact_p0, contact_p1], ...]
author: weiwei
date: 20190805, 20210504
"""
contact_points, face_ids = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2)
contact_normals = objcm.objtrm.face_normals[face_ids]
contact_pairs = []
tree = cKDTree(contact_points)
near_history = np.array([0] * len(contact_points), dtype=bool)
for i, contact_p0 in enumerate(contact_points):
if near_history[i]:
continue
contact_n0 = contact_normals[i]
hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 *
0.001, contact_p0 - contact_n0 * 100)
if len(hit_points) > 0:
for contact_p1, contact_n1 in zip(hit_points, hit_normals):
if np.dot(contact_n0, contact_n1) < -math.cos(
angle_between_contact_normals):
near_points_indices = tree.query_ball_point(contact_p1,
min_dist_between_sampled_contact_points)
if len(near_points_indices):
for npi in near_points_indices:
if np.dot(contact_normals[npi], contact_n1
) > math.cos(angle_between_contact_normals):
near_history[npi] = True
contact_pairs.append([[contact_p0, contact_n0], [
contact_p1, contact_n1]])
if toggle_sampled_points:
return contact_pairs, contact_points
return contact_pairs
def plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(
160), openning_direction='loc_x', rotation_interval=math.radians(22.5),
max_samples=100, min_dist_between_sampled_contact_points=0.005,
contact_offset=0.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,
min_dist_between_sampled_contact_points=
min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f'{i} of {len(contact_pairs)} done!')
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1
) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError('Openning direction must be loc_x or loc_y!')
grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,
gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,
gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=
'preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=
file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name=
'preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])
gripper_s = xag.XArmGripper(enable_cc=True)
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = plan_grasps(gripper_s, objcm,
min_dist_between_sampled_contact_points=0.02)
for grasp_info in grasp_info_list:
(jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,
hnd_rotmat) = grasp_info
gic = gripper_s.copy()
gic.fix_to(hnd_pos, hnd_rotmat)
gic.jaw_to(jaw_width)
print(hnd_pos, hnd_rotmat)
gic.gen_meshmodel().attach_to(base)
base.run()
<|reserved_special_token_1|>
import math
import numpy as np
import basis.robot_math as rm
import grasping.annotation.utils as gu
from scipy.spatial import cKDTree
def plan_contact_pairs(objcm, max_samples=100,
min_dist_between_sampled_contact_points=0.005,
angle_between_contact_normals=math.radians(160), toggle_sampled_points=
False):
"""
find the contact pairs using rayshooting
the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint
:param angle_between_contact_normals:
:param toggle_sampled_points
:return: [[contact_p0, contact_p1], ...]
author: weiwei
date: 20190805, 20210504
"""
contact_points, face_ids = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2)
contact_normals = objcm.objtrm.face_normals[face_ids]
contact_pairs = []
tree = cKDTree(contact_points)
near_history = np.array([0] * len(contact_points), dtype=bool)
for i, contact_p0 in enumerate(contact_points):
if near_history[i]:
continue
contact_n0 = contact_normals[i]
hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 *
0.001, contact_p0 - contact_n0 * 100)
if len(hit_points) > 0:
for contact_p1, contact_n1 in zip(hit_points, hit_normals):
if np.dot(contact_n0, contact_n1) < -math.cos(
angle_between_contact_normals):
near_points_indices = tree.query_ball_point(contact_p1,
min_dist_between_sampled_contact_points)
if len(near_points_indices):
for npi in near_points_indices:
if np.dot(contact_normals[npi], contact_n1
) > math.cos(angle_between_contact_normals):
near_history[npi] = True
contact_pairs.append([[contact_p0, contact_n0], [
contact_p1, contact_n1]])
if toggle_sampled_points:
return contact_pairs, contact_points
return contact_pairs
def plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(
160), openning_direction='loc_x', rotation_interval=math.radians(22.5),
max_samples=100, min_dist_between_sampled_contact_points=0.005,
contact_offset=0.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,
min_dist_between_sampled_contact_points=
min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f'{i} of {len(contact_pairs)} done!')
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1
) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError('Openning direction must be loc_x or loc_y!')
grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,
gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,
gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=
'preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=
file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name=
'preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])
gripper_s = xag.XArmGripper(enable_cc=True)
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = plan_grasps(gripper_s, objcm,
min_dist_between_sampled_contact_points=0.02)
for grasp_info in grasp_info_list:
(jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,
hnd_rotmat) = grasp_info
gic = gripper_s.copy()
gic.fix_to(hnd_pos, hnd_rotmat)
gic.jaw_to(jaw_width)
print(hnd_pos, hnd_rotmat)
gic.gen_meshmodel().attach_to(base)
base.run()
<|reserved_special_token_1|>
import math
import numpy as np
import basis.robot_math as rm
import grasping.annotation.utils as gu
from scipy.spatial import cKDTree
def plan_contact_pairs(objcm,
max_samples=100,
min_dist_between_sampled_contact_points=.005,
angle_between_contact_normals=math.radians(160),
toggle_sampled_points=False):
"""
find the contact pairs using rayshooting
the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint
:param angle_between_contact_normals:
:param toggle_sampled_points
:return: [[contact_p0, contact_p1], ...]
author: weiwei
date: 20190805, 20210504
"""
contact_points, face_ids = objcm.sample_surface(nsample=max_samples,
radius=min_dist_between_sampled_contact_points / 2)
contact_normals = objcm.objtrm.face_normals[face_ids]
contact_pairs = []
tree = cKDTree(contact_points)
near_history = np.array([0] * len(contact_points), dtype=bool)
for i, contact_p0 in enumerate(contact_points):
if near_history[i]: # if the point was previous near to some points, ignore
continue
contact_n0 = contact_normals[i]
hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * .001, contact_p0 - contact_n0 * 100)
if len(hit_points) > 0:
for contact_p1, contact_n1 in zip(hit_points, hit_normals):
if np.dot(contact_n0, contact_n1) < -math.cos(angle_between_contact_normals):
near_points_indices = tree.query_ball_point(contact_p1, min_dist_between_sampled_contact_points)
if len(near_points_indices):
for npi in near_points_indices:
if np.dot(contact_normals[npi], contact_n1) > math.cos(angle_between_contact_normals):
near_history[npi] = True
contact_pairs.append([[contact_p0, contact_n0], [contact_p1, contact_n1]])
if toggle_sampled_points:
return contact_pairs, contact_points
return contact_pairs
def plan_grasps(hnd_s,
objcm,
angle_between_contact_normals=math.radians(160),
openning_direction = 'loc_x',
rotation_interval=math.radians(22.5),
max_samples=100,
min_dist_between_sampled_contact_points=.005,
contact_offset=.002):
"""
:param objcm:
:param hnd_s:
:param angle_between_contact_normals:
:param openning_direction: 'loc_x' or 'loc_y' depending on gripper types
:param rotation_granularity:
:param max_samples:
:param min_dist_between_sampled_contact_points:
:param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces
:return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]
"""
contact_pairs = plan_contact_pairs(objcm,
max_samples=max_samples,
min_dist_between_sampled_contact_points=min_dist_between_sampled_contact_points,
angle_between_contact_normals=angle_between_contact_normals)
grasp_info_list = []
import modeling.geometric_model as gm
for i, cp in enumerate(contact_pairs):
print(f"{i} of {len(contact_pairs)} done!")
contact_p0, contact_n0 = cp[0]
contact_p1, contact_n1 = cp[1]
contact_center = (contact_p0 + contact_p1) / 2
jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2
if jaw_width > hnd_s.jawwidth_rng[1]:
continue
if openning_direction == 'loc_x':
jaw_center_x = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
jaw_center_y = np.cross(jaw_center_z, jaw_center_x)
elif openning_direction == 'loc_y':
jaw_center_y = contact_n0
jaw_center_z = rm.orthogonal_vector(contact_n0)
else:
raise ValueError("Openning direction must be loc_x or loc_y!")
grasp_info_list += gu.define_grasp_with_rotation(hnd_s,
objcm,
gl_jaw_center_pos=contact_center,
gl_jaw_center_z=jaw_center_z,
gl_jaw_center_y=jaw_center_y,
jaw_width=jaw_width,
gl_rotation_ax=contact_n0,
rotation_interval=rotation_interval,
toggle_flip=True)
return grasp_info_list
def write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):
if root is None:
root = './'
gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=file_name, append=append)
def load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):
if root is None:
root = './'
return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)
if __name__ == '__main__':
import os
import basis
import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag
import modeling.collision_model as cm
import visualization.panda.world as wd
base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])
gripper_s = xag.XArmGripper(enable_cc=True)
objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')
objcm = cm.CollisionModel(objpath)
objcm.attach_to(base)
objcm.show_localframe()
grasp_info_list = plan_grasps(gripper_s, objcm, min_dist_between_sampled_contact_points=.02)
for grasp_info in grasp_info_list:
jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gic = gripper_s.copy()
gic.fix_to(hnd_pos, hnd_rotmat)
gic.jaw_to(jaw_width)
print(hnd_pos, hnd_rotmat)
gic.gen_meshmodel().attach_to(base)
base.run()
|
flexible
|
{
"blob_id": "738e6d4d608aa977094420a432cbd8a05ea8a1b5",
"index": 4384,
"step-1": "<mask token>\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm,\n min_dist_between_sampled_contact_points=0.02)\n for grasp_info in grasp_info_list:\n (jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,\n hnd_rotmat) = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-4": "import math\nimport numpy as np\nimport basis.robot_math as rm\nimport grasping.annotation.utils as gu\nfrom scipy.spatial import cKDTree\n\n\ndef plan_contact_pairs(objcm, max_samples=100,\n min_dist_between_sampled_contact_points=0.005,\n angle_between_contact_normals=math.radians(160), toggle_sampled_points=\n False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]:\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * \n 0.001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(\n angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1,\n min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1\n ) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [\n contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s, objcm, angle_between_contact_normals=math.radians(\n 160), openning_direction='loc_x', rotation_interval=math.radians(22.5),\n max_samples=100, min_dist_between_sampled_contact_points=0.005,\n contact_offset=0.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm, max_samples=max_samples,\n min_dist_between_sampled_contact_points=\n min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f'{i} of {len(contact_pairs)} done!')\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1\n ) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError('Openning direction must be loc_x or loc_y!')\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s, objcm,\n gl_jaw_center_pos=contact_center, gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y, jaw_width=jaw_width,\n gl_rotation_ax=contact_n0, rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name=\n 'preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=\n file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name=\n 'preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n base = wd.World(cam_pos=[0.5, 0.5, 0.3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm,\n min_dist_between_sampled_contact_points=0.02)\n for grasp_info in grasp_info_list:\n (jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos,\n hnd_rotmat) = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-5": "import math\nimport numpy as np\nimport basis.robot_math as rm\nimport grasping.annotation.utils as gu\nfrom scipy.spatial import cKDTree\n\n\ndef plan_contact_pairs(objcm,\n max_samples=100,\n min_dist_between_sampled_contact_points=.005,\n angle_between_contact_normals=math.radians(160),\n toggle_sampled_points=False):\n \"\"\"\n find the contact pairs using rayshooting\n the finally returned number of contact pairs may be smaller than the given max_samples due to the min_dist constraint\n :param angle_between_contact_normals:\n :param toggle_sampled_points\n :return: [[contact_p0, contact_p1], ...]\n author: weiwei\n date: 20190805, 20210504\n \"\"\"\n contact_points, face_ids = objcm.sample_surface(nsample=max_samples,\n radius=min_dist_between_sampled_contact_points / 2)\n contact_normals = objcm.objtrm.face_normals[face_ids]\n contact_pairs = []\n tree = cKDTree(contact_points)\n near_history = np.array([0] * len(contact_points), dtype=bool)\n for i, contact_p0 in enumerate(contact_points):\n if near_history[i]: # if the point was previous near to some points, ignore\n continue\n contact_n0 = contact_normals[i]\n hit_points, hit_normals = objcm.ray_hit(contact_p0 - contact_n0 * .001, contact_p0 - contact_n0 * 100)\n if len(hit_points) > 0:\n for contact_p1, contact_n1 in zip(hit_points, hit_normals):\n if np.dot(contact_n0, contact_n1) < -math.cos(angle_between_contact_normals):\n near_points_indices = tree.query_ball_point(contact_p1, min_dist_between_sampled_contact_points)\n if len(near_points_indices):\n for npi in near_points_indices:\n if np.dot(contact_normals[npi], contact_n1) > math.cos(angle_between_contact_normals):\n near_history[npi] = True\n contact_pairs.append([[contact_p0, contact_n0], [contact_p1, contact_n1]])\n if toggle_sampled_points:\n return contact_pairs, contact_points\n return contact_pairs\n\n\ndef plan_grasps(hnd_s,\n objcm,\n angle_between_contact_normals=math.radians(160),\n openning_direction = 'loc_x',\n rotation_interval=math.radians(22.5),\n max_samples=100,\n min_dist_between_sampled_contact_points=.005,\n contact_offset=.002):\n \"\"\"\n\n :param objcm:\n :param hnd_s:\n :param angle_between_contact_normals:\n :param openning_direction: 'loc_x' or 'loc_y' depending on gripper types\n :param rotation_granularity:\n :param max_samples:\n :param min_dist_between_sampled_contact_points:\n :param contact_offset: offset at the cotnact to avoid being closely in touch with object surfaces\n :return: a list [[jawwidth, gl_jaw_center_pos, pos, rotmat], ...]\n \"\"\"\n contact_pairs = plan_contact_pairs(objcm,\n max_samples=max_samples,\n min_dist_between_sampled_contact_points=min_dist_between_sampled_contact_points,\n angle_between_contact_normals=angle_between_contact_normals)\n grasp_info_list = []\n import modeling.geometric_model as gm\n for i, cp in enumerate(contact_pairs):\n print(f\"{i} of {len(contact_pairs)} done!\")\n contact_p0, contact_n0 = cp[0]\n contact_p1, contact_n1 = cp[1]\n contact_center = (contact_p0 + contact_p1) / 2\n jaw_width = np.linalg.norm(contact_p0 - contact_p1) + contact_offset * 2\n if jaw_width > hnd_s.jawwidth_rng[1]:\n continue\n if openning_direction == 'loc_x':\n jaw_center_x = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n jaw_center_y = np.cross(jaw_center_z, jaw_center_x)\n elif openning_direction == 'loc_y':\n jaw_center_y = contact_n0\n jaw_center_z = rm.orthogonal_vector(contact_n0)\n else:\n raise ValueError(\"Openning direction must be loc_x or loc_y!\")\n grasp_info_list += gu.define_grasp_with_rotation(hnd_s,\n objcm,\n gl_jaw_center_pos=contact_center,\n gl_jaw_center_z=jaw_center_z,\n gl_jaw_center_y=jaw_center_y,\n jaw_width=jaw_width,\n gl_rotation_ax=contact_n0,\n rotation_interval=rotation_interval,\n toggle_flip=True)\n return grasp_info_list\n\n\ndef write_pickle_file(objcm_name, grasp_info_list, root=None, file_name='preannotated_grasps.pickle', append=False):\n if root is None:\n root = './'\n gu.write_pickle_file(objcm_name, grasp_info_list, root=root, file_name=file_name, append=append)\n\n\ndef load_pickle_file(objcm_name, root=None, file_name='preannotated_grasps.pickle'):\n if root is None:\n root = './'\n return gu.load_pickle_file(objcm_name, root=root, file_name=file_name)\n\n\nif __name__ == '__main__':\n import os\n import basis\n import robot_sim.end_effectors.grippers.xarm_gripper.xarm_gripper as xag\n import modeling.collision_model as cm\n import visualization.panda.world as wd\n\n base = wd.World(cam_pos=[.5, .5, .3], lookat_pos=[0, 0, 0])\n gripper_s = xag.XArmGripper(enable_cc=True)\n objpath = os.path.join(basis.__path__[0], 'objects', 'block.stl')\n objcm = cm.CollisionModel(objpath)\n objcm.attach_to(base)\n objcm.show_localframe()\n grasp_info_list = plan_grasps(gripper_s, objcm, min_dist_between_sampled_contact_points=.02)\n for grasp_info in grasp_info_list:\n jaw_width, gl_jaw_center_pos, gl_jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info\n gic = gripper_s.copy()\n gic.fix_to(hnd_pos, hnd_rotmat)\n gic.jaw_to(jaw_width)\n print(hnd_pos, hnd_rotmat)\n gic.gen_meshmodel().attach_to(base)\n base.run()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Import the SDK
import json
import boto3
from botocore.exceptions import ClientError
import uuid
#dbclient = boto3.client('dynamodb')
dbresource = boto3.resource('dynamodb', region_name='eu-west-1')
rekclient = boto3.client('rekognition','eu-west-1')
collection_name = 'swiftarycelebrity'
ScannedFacestable = dbresource.Table('ScannedFaces')
#
# List all images in the bucket
#
response = rekclient.list_faces( CollectionId=collection_name)
Faces =response ['Faces']
#print Faces
for Images in Faces:
lv_FaceId = Images ['FaceId']
lv_ImageId = Images ['ImageId']
lv_ExternalImageId = Images ['ExternalImageId'],
lv_Names = ExternalImageId.split("_")
lv_Firstname = lv_Names[0]
lv_Surname = lv_Names[1]
print ('FaceId %s' % lv_FaceId)
print ('ImageId %s' % lv_ImageId)
print ('ExternalImageId %s' % lv_ExternalImageId)
print ('Infor %s' %json.dumps(Images))
print ('FirstName %s' % lv_FirstName )
print ('SurName %s' % lv_SurName )
#response = ScannedFacestable.put_item(
# Item={
# 'FaceId' : lv_FaceId,
# 'ImageId' : lv_ImageId,
# 'ExternalImageId' : lv_ExternalImageId,
# 'Firstname' : lv_Firstname,
# 'Surname' : lv_Surname ,
# 'Info' : json.dumps(Images)
# }
#)
print("PutItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
normal
|
{
"blob_id": "6369c692e358c0dfd1193c6e961ecf9b521ea9ba",
"index": 4649,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor Images in Faces:\n lv_FaceId = Images['FaceId']\n lv_ImageId = Images['ImageId']\n lv_ExternalImageId = Images['ExternalImageId'],\n lv_Names = ExternalImageId.split('_')\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n print('FaceId %s' % lv_FaceId)\n print('ImageId %s' % lv_ImageId)\n print('ExternalImageId %s' % lv_ExternalImageId)\n print('Infor %s' % json.dumps(Images))\n print('FirstName %s' % lv_FirstName)\n print('SurName %s' % lv_SurName)\nprint('PutItem succeeded:')\n",
"step-3": "<mask token>\ndbresource = boto3.resource('dynamodb', region_name='eu-west-1')\nrekclient = boto3.client('rekognition', 'eu-west-1')\ncollection_name = 'swiftarycelebrity'\nScannedFacestable = dbresource.Table('ScannedFaces')\nresponse = rekclient.list_faces(CollectionId=collection_name)\nFaces = response['Faces']\nfor Images in Faces:\n lv_FaceId = Images['FaceId']\n lv_ImageId = Images['ImageId']\n lv_ExternalImageId = Images['ExternalImageId'],\n lv_Names = ExternalImageId.split('_')\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n print('FaceId %s' % lv_FaceId)\n print('ImageId %s' % lv_ImageId)\n print('ExternalImageId %s' % lv_ExternalImageId)\n print('Infor %s' % json.dumps(Images))\n print('FirstName %s' % lv_FirstName)\n print('SurName %s' % lv_SurName)\nprint('PutItem succeeded:')\n",
"step-4": "import json\nimport boto3\nfrom botocore.exceptions import ClientError\nimport uuid\ndbresource = boto3.resource('dynamodb', region_name='eu-west-1')\nrekclient = boto3.client('rekognition', 'eu-west-1')\ncollection_name = 'swiftarycelebrity'\nScannedFacestable = dbresource.Table('ScannedFaces')\nresponse = rekclient.list_faces(CollectionId=collection_name)\nFaces = response['Faces']\nfor Images in Faces:\n lv_FaceId = Images['FaceId']\n lv_ImageId = Images['ImageId']\n lv_ExternalImageId = Images['ExternalImageId'],\n lv_Names = ExternalImageId.split('_')\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n print('FaceId %s' % lv_FaceId)\n print('ImageId %s' % lv_ImageId)\n print('ExternalImageId %s' % lv_ExternalImageId)\n print('Infor %s' % json.dumps(Images))\n print('FirstName %s' % lv_FirstName)\n print('SurName %s' % lv_SurName)\nprint('PutItem succeeded:')\n",
"step-5": "# Import the SDK\nimport json\nimport boto3\nfrom botocore.exceptions import ClientError\nimport uuid\n#dbclient = boto3.client('dynamodb')\ndbresource = boto3.resource('dynamodb', region_name='eu-west-1')\n\nrekclient = boto3.client('rekognition','eu-west-1')\ncollection_name = 'swiftarycelebrity'\n\nScannedFacestable = dbresource.Table('ScannedFaces')\n\n#\n# List all images in the bucket\n#\n\n\nresponse = rekclient.list_faces( CollectionId=collection_name)\nFaces =response ['Faces']\n#print Faces\n\nfor Images in Faces:\n lv_FaceId = Images ['FaceId']\n lv_ImageId = Images ['ImageId']\n lv_ExternalImageId = Images ['ExternalImageId'],\n lv_Names = ExternalImageId.split(\"_\")\n lv_Firstname = lv_Names[0]\n lv_Surname = lv_Names[1]\n\n print ('FaceId %s' % lv_FaceId)\n print ('ImageId %s' % lv_ImageId)\n print ('ExternalImageId %s' % lv_ExternalImageId)\n print ('Infor %s' %json.dumps(Images)) \n print ('FirstName %s' % lv_FirstName )\n print ('SurName %s' % lv_SurName )\n\n\n #response = ScannedFacestable.put_item(\n # Item={\n # 'FaceId' : lv_FaceId,\n # 'ImageId' : lv_ImageId,\n # 'ExternalImageId' : lv_ExternalImageId,\n # 'Firstname' : lv_Firstname,\n # 'Surname' : lv_Surname ,\n # 'Info' : json.dumps(Images)\n # }\n #)\n\nprint(\"PutItem succeeded:\")\n#print(json.dumps(response, indent=4, cls=DecimalEncoder))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def publish_data_on_redis(data, channel):
redis_client.publish(channel, json.dumps(data))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
redis_client = redis.StrictRedis(host='redis', port=6379, db=1, password=
'pAssw0rd')
def publish_data_on_redis(data, channel):
redis_client.publish(channel, json.dumps(data))
<|reserved_special_token_1|>
import json
import redis
redis_client = redis.StrictRedis(host='redis', port=6379, db=1, password=
'pAssw0rd')
def publish_data_on_redis(data, channel):
redis_client.publish(channel, json.dumps(data))
<|reserved_special_token_1|>
import json
import redis
redis_client = redis.StrictRedis(host="redis", port=6379, db=1, password="pAssw0rd")
def publish_data_on_redis(data, channel):
redis_client.publish(channel, json.dumps(data))
|
flexible
|
{
"blob_id": "d61024ecbd092852fc3396e6919d6d3c8aa554db",
"index": 6178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))\n",
"step-3": "<mask token>\nredis_client = redis.StrictRedis(host='redis', port=6379, db=1, password=\n 'pAssw0rd')\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))\n",
"step-4": "import json\nimport redis\nredis_client = redis.StrictRedis(host='redis', port=6379, db=1, password=\n 'pAssw0rd')\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))\n",
"step-5": "import json\nimport redis\n\nredis_client = redis.StrictRedis(host=\"redis\", port=6379, db=1, password=\"pAssw0rd\")\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in range(tot):
id, pw = map(str, input().split())
ID_dict[id] = pw
for _ in range(inp):
print(ID_dict[input()])
<|reserved_special_token_1|>
tot, inp = map(int, input().split())
ID_dict = {}
for _ in range(tot):
id, pw = map(str, input().split())
ID_dict[id] = pw
for _ in range(inp):
print(ID_dict[input()])
<|reserved_special_token_1|>
#17219
tot, inp = map(int, input().split())
ID_dict = {}
for _ in range(tot):
id, pw = map(str, input().split())
ID_dict[id] = pw
for _ in range(inp):
print(ID_dict[input()])
|
flexible
|
{
"blob_id": "cf7556034020d88ddb6b71b9f908c905e2f03cdb",
"index": 4076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(tot):\n id, pw = map(str, input().split())\n ID_dict[id] = pw\nfor _ in range(inp):\n print(ID_dict[input()])\n",
"step-3": "tot, inp = map(int, input().split())\nID_dict = {}\nfor _ in range(tot):\n id, pw = map(str, input().split())\n ID_dict[id] = pw\nfor _ in range(inp):\n print(ID_dict[input()])\n",
"step-4": "#17219\ntot, inp = map(int, input().split())\nID_dict = {}\n\nfor _ in range(tot):\n id, pw = map(str, input().split())\n ID_dict[id] = pw\n\nfor _ in range(inp):\n print(ID_dict[input()])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@api.route('/depthinthewild/transform')
@api.expect(upload_parser)
class DepthInTheWildDepthTransform(Resource):
<|reserved_special_token_0|>
@api.route('/depthinthewild/transform_raw')
@api.expect(upload_parser)
class DepthInTheWildDepthTransformRaw(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
depth_map, _ = hourglass.transform(image)
return dict(depth_map=depth_map)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
upload_parser.add_argument('image', location='files', type=FileStorage,
required=True)
@api.route('/depthinthewild/transform')
@api.expect(upload_parser)
class DepthInTheWildDepthTransform(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
_, depth_map_img = hourglass.transform(image)
return serve_pil_image(depth_map_img)
@api.route('/depthinthewild/transform_raw')
@api.expect(upload_parser)
class DepthInTheWildDepthTransformRaw(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
depth_map, _ = hourglass.transform(image)
return dict(depth_map=depth_map)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
api = Namespace('nyudepth', description='Models Trained on NYUDepth')
upload_parser = api.parser()
upload_parser.add_argument('image', location='files', type=FileStorage,
required=True)
@api.route('/depthinthewild/transform')
@api.expect(upload_parser)
class DepthInTheWildDepthTransform(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
_, depth_map_img = hourglass.transform(image)
return serve_pil_image(depth_map_img)
@api.route('/depthinthewild/transform_raw')
@api.expect(upload_parser)
class DepthInTheWildDepthTransformRaw(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
depth_map, _ = hourglass.transform(image)
return dict(depth_map=depth_map)
<|reserved_special_token_1|>
from PIL import Image
from flask_restplus import Namespace, Resource
from werkzeug.datastructures import FileStorage
from core.models.depthinthewild import DepthInTheWild
from core.utils import serve_pil_image
api = Namespace('nyudepth', description='Models Trained on NYUDepth')
upload_parser = api.parser()
upload_parser.add_argument('image', location='files', type=FileStorage,
required=True)
@api.route('/depthinthewild/transform')
@api.expect(upload_parser)
class DepthInTheWildDepthTransform(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
_, depth_map_img = hourglass.transform(image)
return serve_pil_image(depth_map_img)
@api.route('/depthinthewild/transform_raw')
@api.expect(upload_parser)
class DepthInTheWildDepthTransformRaw(Resource):
def post(self):
args = upload_parser.parse_args()
uploaded_file = args['image']
image = Image.open(uploaded_file.stream)
hourglass = DepthInTheWild()
depth_map, _ = hourglass.transform(image)
return dict(depth_map=depth_map)
|
flexible
|
{
"blob_id": "acf409f2e56cd16b7dc07476b49b9c18675f7775",
"index": 5540,
"step-1": "<mask token>\n\n\n@api.route('/depthinthewild/transform')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n <mask token>\n\n\n@api.route('/depthinthewild/transform_raw')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-2": "<mask token>\nupload_parser.add_argument('image', location='files', type=FileStorage,\n required=True)\n\n\n@api.route('/depthinthewild/transform')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n _, depth_map_img = hourglass.transform(image)\n return serve_pil_image(depth_map_img)\n\n\n@api.route('/depthinthewild/transform_raw')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-3": "<mask token>\napi = Namespace('nyudepth', description='Models Trained on NYUDepth')\nupload_parser = api.parser()\nupload_parser.add_argument('image', location='files', type=FileStorage,\n required=True)\n\n\n@api.route('/depthinthewild/transform')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n _, depth_map_img = hourglass.transform(image)\n return serve_pil_image(depth_map_img)\n\n\n@api.route('/depthinthewild/transform_raw')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-4": "from PIL import Image\nfrom flask_restplus import Namespace, Resource\nfrom werkzeug.datastructures import FileStorage\nfrom core.models.depthinthewild import DepthInTheWild\nfrom core.utils import serve_pil_image\napi = Namespace('nyudepth', description='Models Trained on NYUDepth')\nupload_parser = api.parser()\nupload_parser.add_argument('image', location='files', type=FileStorage,\n required=True)\n\n\n@api.route('/depthinthewild/transform')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransform(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n _, depth_map_img = hourglass.transform(image)\n return serve_pil_image(depth_map_img)\n\n\n@api.route('/depthinthewild/transform_raw')\n@api.expect(upload_parser)\nclass DepthInTheWildDepthTransformRaw(Resource):\n\n def post(self):\n args = upload_parser.parse_args()\n uploaded_file = args['image']\n image = Image.open(uploaded_file.stream)\n hourglass = DepthInTheWild()\n depth_map, _ = hourglass.transform(image)\n return dict(depth_map=depth_map)\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
<|reserved_special_token_0|>
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,
bdict):
owu = owus / utts
lineout1 = [language, corpus, child, utts, owu]
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
boundarydist = []
diphonedist = []
k = 0
diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_diphone-system.txt')
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n')
for diph, denom in ordered:
k += 1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom
boundarydist.append(boundprob)
relfreq = denom / tokencount
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph,
relfreq, boundprob))
writefile.close()
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm ' + tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R ' + diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else:
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
return lineout1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,
bdict):
owu = owus / utts
lineout1 = [language, corpus, child, utts, owu]
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
boundarydist = []
diphonedist = []
k = 0
diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_diphone-system.txt')
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n')
for diph, denom in ordered:
k += 1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom
boundarydist.append(boundprob)
relfreq = denom / tokencount
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph,
relfreq, boundprob))
writefile.close()
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm ' + tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R ' + diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else:
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
return lineout1
def word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,
wcount):
lineout2 = deepcopy(lineout1)
meanlength = round(pcount / wcount, 6)
pboundary = round(wcount / pcount, 6)
lineout2.append(wcount)
lineout2.append(pcount)
lineout2.append(meanlength)
lineout2.append(pboundary)
tmpfile = '/Users/' + uname + '/tmp/tmp.txt'
goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_gold-for-wordseg.txt')
prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_prepared-for-wordseg.txt')
segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_segmented-by_' + algo + '.txt')
evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_segmented-by_' + algo + '_eval.txt')
tmp = open(tmpfile, 'w')
tmp.write(text)
tmp.close()
os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %
(tmpfile, goldfile, prepfile))
lineout2.append(algo)
if algo == 'dibs':
os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,
algo, tmpfile, segfile))
elif algo == 'utt_baseline':
os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))
elif algo == 'rand_baseline':
os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)
)
elif algo == 'unit_baseline':
os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))
elif algo == 'oracle':
os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,
pboundary, segfile))
elif algo == 'tp_ftp':
os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,
segfile))
elif algo == 'tp_btp':
os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,
segfile))
elif algo == 'tp_mi':
os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,
segfile))
else:
os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))
os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))
with open(evalfile, 'r') as eval:
for line in eval:
lineout2.append(re.sub('^[^\\d]*', '', line.rstrip()))
eval.close()
print(lineout2)
return lineout2
<|reserved_special_token_0|>
statscsv.writerow(('language', 'corpus', 'child', 'n.utterances',
'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',
'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',
'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',
'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',
'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',
'boundary.noedge.R', 'boundary.noedge.F'))
<|reserved_special_token_0|>
for filein in glob.glob(directory + '*_phonemes.txt', recursive=True):
print(filein)
language, corpus, child = filein.split('/')[-1].split('_')[0:3]
phondict = collections.Counter()
boundaries = collections.Counter()
phonecount = 0
wordcount = 0
with io.open(filein, 'r', encoding='utf8') as myfile:
linecount = 0
owucount = 0
inputsofar = ''
for line in myfile:
inputsofar += line
linecount += 1
ewords = line.count(';eword')
wordcount += ewords
if ewords == 1:
owucount += 1
phones = line.split()
nphones = len(phones) - ewords
phonecount += nphones
for i, phone in enumerate(phones):
if i == 0 or phones[i] == ';eword' or phones[i - 1
] == ';eword':
pass
else:
diphone = phones[i - 1] + phones[i]
phondict[diphone] += 1
if i == 1 or phones[i + 1] == ';eword' or phones[i - 2
] == ';eword':
boundaries[diphone] += 1
if thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language,
corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1,
language, corpus, child, phonecount, wordcount)
statscsv.writerow(csvline2)
if not thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language,
corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1,
language, corpus, child, phonecount, wordcount)
statscsv.writerow(csvline2)
myfile.close()
print('FINISHED')
print('see ' + statsfile)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
uname = getpass.getuser()
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,
bdict):
owu = owus / utts
lineout1 = [language, corpus, child, utts, owu]
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
boundarydist = []
diphonedist = []
k = 0
diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_diphone-system.txt')
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n')
for diph, denom in ordered:
k += 1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom
boundarydist.append(boundprob)
relfreq = denom / tokencount
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph,
relfreq, boundprob))
writefile.close()
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm ' + tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R ' + diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else:
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
return lineout1
def word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,
wcount):
lineout2 = deepcopy(lineout1)
meanlength = round(pcount / wcount, 6)
pboundary = round(wcount / pcount, 6)
lineout2.append(wcount)
lineout2.append(pcount)
lineout2.append(meanlength)
lineout2.append(pboundary)
tmpfile = '/Users/' + uname + '/tmp/tmp.txt'
goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_gold-for-wordseg.txt')
prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_prepared-for-wordseg.txt')
segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_segmented-by_' + algo + '.txt')
evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_segmented-by_' + algo + '_eval.txt')
tmp = open(tmpfile, 'w')
tmp.write(text)
tmp.close()
os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %
(tmpfile, goldfile, prepfile))
lineout2.append(algo)
if algo == 'dibs':
os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,
algo, tmpfile, segfile))
elif algo == 'utt_baseline':
os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))
elif algo == 'rand_baseline':
os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)
)
elif algo == 'unit_baseline':
os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))
elif algo == 'oracle':
os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,
pboundary, segfile))
elif algo == 'tp_ftp':
os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,
segfile))
elif algo == 'tp_btp':
os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,
segfile))
elif algo == 'tp_mi':
os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,
segfile))
else:
os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))
os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))
with open(evalfile, 'r') as eval:
for line in eval:
lineout2.append(re.sub('^[^\\d]*', '', line.rstrip()))
eval.close()
print(lineout2)
return lineout2
statsfile = ('/Users/' + uname +
'/Corpora/CHILDES/segmentation_experiment_stats.csv')
statsopen = open(statsfile, 'wt')
statscsv = csv.writer(statsopen)
statscsv.writerow(('language', 'corpus', 'child', 'n.utterances',
'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',
'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',
'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',
'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',
'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',
'boundary.noedge.R', 'boundary.noedge.F'))
thousand = re.compile('000$')
algos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle',
'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']
directory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'
for filein in glob.glob(directory + '*_phonemes.txt', recursive=True):
print(filein)
language, corpus, child = filein.split('/')[-1].split('_')[0:3]
phondict = collections.Counter()
boundaries = collections.Counter()
phonecount = 0
wordcount = 0
with io.open(filein, 'r', encoding='utf8') as myfile:
linecount = 0
owucount = 0
inputsofar = ''
for line in myfile:
inputsofar += line
linecount += 1
ewords = line.count(';eword')
wordcount += ewords
if ewords == 1:
owucount += 1
phones = line.split()
nphones = len(phones) - ewords
phonecount += nphones
for i, phone in enumerate(phones):
if i == 0 or phones[i] == ';eword' or phones[i - 1
] == ';eword':
pass
else:
diphone = phones[i - 1] + phones[i]
phondict[diphone] += 1
if i == 1 or phones[i + 1] == ';eword' or phones[i - 2
] == ';eword':
boundaries[diphone] += 1
if thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language,
corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1,
language, corpus, child, phonecount, wordcount)
statscsv.writerow(csvline2)
if not thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language,
corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1,
language, corpus, child, phonecount, wordcount)
statscsv.writerow(csvline2)
myfile.close()
print('FINISHED')
print('see ' + statsfile)
<|reserved_special_token_1|>
from __future__ import division
import io, collections, os, glob, csv, re
from scipy.stats import entropy
from copy import deepcopy
import getpass
uname = getpass.getuser()
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,
bdict):
owu = owus / utts
lineout1 = [language, corpus, child, utts, owu]
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
boundarydist = []
diphonedist = []
k = 0
diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_diphone-system.txt')
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n')
for diph, denom in ordered:
k += 1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom
boundarydist.append(boundprob)
relfreq = denom / tokencount
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph,
relfreq, boundprob))
writefile.close()
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm ' + tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R ' + diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else:
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
return lineout1
def word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,
wcount):
lineout2 = deepcopy(lineout1)
meanlength = round(pcount / wcount, 6)
pboundary = round(wcount / pcount, 6)
lineout2.append(wcount)
lineout2.append(pcount)
lineout2.append(meanlength)
lineout2.append(pboundary)
tmpfile = '/Users/' + uname + '/tmp/tmp.txt'
goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_gold-for-wordseg.txt')
prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_prepared-for-wordseg.txt')
segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_segmented-by_' + algo + '.txt')
evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +
'_' + corpus + '_' + child + '_' + str(lcount) +
'utterances_segmented-by_' + algo + '_eval.txt')
tmp = open(tmpfile, 'w')
tmp.write(text)
tmp.close()
os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %
(tmpfile, goldfile, prepfile))
lineout2.append(algo)
if algo == 'dibs':
os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,
algo, tmpfile, segfile))
elif algo == 'utt_baseline':
os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))
elif algo == 'rand_baseline':
os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)
)
elif algo == 'unit_baseline':
os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))
elif algo == 'oracle':
os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,
pboundary, segfile))
elif algo == 'tp_ftp':
os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,
segfile))
elif algo == 'tp_btp':
os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,
segfile))
elif algo == 'tp_mi':
os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,
segfile))
else:
os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))
os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))
with open(evalfile, 'r') as eval:
for line in eval:
lineout2.append(re.sub('^[^\\d]*', '', line.rstrip()))
eval.close()
print(lineout2)
return lineout2
statsfile = ('/Users/' + uname +
'/Corpora/CHILDES/segmentation_experiment_stats.csv')
statsopen = open(statsfile, 'wt')
statscsv = csv.writer(statsopen)
statscsv.writerow(('language', 'corpus', 'child', 'n.utterances',
'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',
'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',
'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',
'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',
'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',
'boundary.noedge.R', 'boundary.noedge.F'))
thousand = re.compile('000$')
algos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle',
'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']
directory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'
for filein in glob.glob(directory + '*_phonemes.txt', recursive=True):
print(filein)
language, corpus, child = filein.split('/')[-1].split('_')[0:3]
phondict = collections.Counter()
boundaries = collections.Counter()
phonecount = 0
wordcount = 0
with io.open(filein, 'r', encoding='utf8') as myfile:
linecount = 0
owucount = 0
inputsofar = ''
for line in myfile:
inputsofar += line
linecount += 1
ewords = line.count(';eword')
wordcount += ewords
if ewords == 1:
owucount += 1
phones = line.split()
nphones = len(phones) - ewords
phonecount += nphones
for i, phone in enumerate(phones):
if i == 0 or phones[i] == ';eword' or phones[i - 1
] == ';eword':
pass
else:
diphone = phones[i - 1] + phones[i]
phondict[diphone] += 1
if i == 1 or phones[i + 1] == ';eword' or phones[i - 2
] == ';eword':
boundaries[diphone] += 1
if thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language,
corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1,
language, corpus, child, phonecount, wordcount)
statscsv.writerow(csvline2)
if not thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language,
corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1,
language, corpus, child, phonecount, wordcount)
statscsv.writerow(csvline2)
myfile.close()
print('FINISHED')
print('see ' + statsfile)
<|reserved_special_token_1|>
## n.b. uses python 3 wordseg virtualenv (wordseg needs Py3)
# e.g. $ source ~/venvs/Py3/wordseg/bin/activate
## wordseg: see https://wordseg.readthedocs.io
from __future__ import division
import io, collections, os, glob, csv, re
from scipy.stats import entropy
from copy import deepcopy
# get username
import getpass
uname = getpass.getuser()
## get corpus stats
def process_corpus(lcount, text, language, corpus, child, utts, owus, pdict, bdict):
owu = owus/utts
lineout1 = [language, corpus, child, utts, owu]
# corpus types, tokens
ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)
tokencount = sum(pdict.values())
lineout1.append(tokencount)
typecount = len(ordered)
lineout1.append(typecount)
ttr = typecount / tokencount
lineout1.append(ttr)
# diphone distributions
boundarydist = []
diphonedist = []
k=0
diphfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_diphone-system.txt'
with io.open(diphfile, 'w', encoding='utf8') as writefile:
writefile.write('k\tf\ttype\trel.freq\tboundary.prob\n') # only columns 1-3 are used by lnre.R
for diph, denom in ordered:
k+=1
if bdict[diph]:
num = bdict[diph]
else:
num = 0
boundprob = num / denom # boundary prob
boundarydist.append(boundprob)
relfreq = denom / tokencount # diphone prob
diphonedist.append(relfreq)
writefile.write('%i\t%i\t%s\t%.6f\t%.6f\n' % (k, denom, diph, relfreq, boundprob))
writefile.close()
# entropy calcs
boundaryH = entropy(boundarydist, qk=None, base=2)
lineout1.append(boundaryH)
diphoneH = entropy(diphonedist, qk=None, base=2)
lineout1.append(diphoneH)
# run Zipf LNRE fit (clear old file first)
tmplnre = '/Users/' + uname + '/tmp/lnre.txt'
cmd1 = 'rm '+ tmplnre
os.system(cmd1)
cmd2 = 'Rscript lnre.R '+ diphfile
os.system(cmd2)
if os.path.exists(tmplnre):
with open(tmplnre, 'r') as lnre:
for line in lnre:
lineout1.append(line.rstrip())
lnre.close()
else: # else 3 zeros
lineout1.append(0)
lineout1.append(0)
lineout1.append(0)
# get C_WALS stat (not in use)
#langcode = langcodes[lang]
return lineout1
## run wordseg
def word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount, wcount):
# start point is output of process_corpus()
lineout2 = deepcopy(lineout1)
meanlength = round(pcount/wcount, 6) # phones per word
pboundary = round(wcount/pcount, 6) # words per phone
lineout2.append(wcount)
lineout2.append(pcount)
lineout2.append(meanlength)
lineout2.append(pboundary)
# prepare filenames
tmpfile = '/Users/' + uname + '/tmp/tmp.txt'
goldfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_gold-for-wordseg.txt'
prepfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_prepared-for-wordseg.txt'
segfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '.txt'
evalfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '_eval.txt'
# write text so far to temporary file
tmp = open(tmpfile, 'w')
tmp.write(text)
tmp.close()
# prepare gold and input files for wordseg
os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' % (tmpfile, goldfile, prepfile)) # ignore punctuation
lineout2.append(algo)
# run wordseg command
if algo=='dibs': # DIBS-phrasal uses phrases (utterances) as chunks
os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile, algo, tmpfile, segfile))
elif algo=='utt_baseline': # utterance baseline
os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))
elif algo=='rand_baseline': # random baseline
os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile))
elif algo=='unit_baseline': # basic unit baseline
os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))
elif algo=='oracle': # oracle baseline: P(word|phone)
os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile, pboundary, segfile))
elif algo=='tp_ftp': # transitional prob: forwards
os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile, segfile))
elif algo=='tp_btp': # transitional prob: forwards
os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile, segfile))
elif algo=='tp_mi': # transitional prob: mutual information
os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile, segfile))
else:
os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))
# evaluate
os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))
with open(evalfile, 'r') as eval:
for line in eval:
lineout2.append(re.sub('^[^\d]*', '', line.rstrip())) # strip from the start until first number encountered
eval.close()
print(lineout2)
return lineout2
## open results file
statsfile = '/Users/' + uname + '/Corpora/CHILDES/segmentation_experiment_stats.csv'
statsopen = open(statsfile,'wt')
statscsv = csv.writer(statsopen)
statscsv.writerow(('language', 'corpus', 'child', 'n.utterances', 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy', 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones', 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR', 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P', 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P', 'boundary.noedge.R', 'boundary.noedge.F'))
## input directory (the phonemized files)
thousand = re.compile('000$')
algos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle', 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']
directory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'
for filein in glob.glob(directory+'*_phonemes.txt', recursive=True):
print(filein)
# parse filename
(language, corpus, child) = filein.split('/')[-1].split('_')[0:3]
# read corpus
phondict = collections.Counter()
boundaries = collections.Counter()
phonecount = 0
wordcount = 0
with io.open(filein, 'r', encoding='utf8') as myfile:
linecount = 0
owucount = 0
inputsofar = ''
for line in myfile:
inputsofar += line
linecount += 1
ewords = line.count(';eword')
wordcount += ewords
if ewords==1:
owucount += 1
#print('utterance: %s' % (line.rstrip()))
phones = line.split() # split on whitespace
nphones = len(phones) - ewords
phonecount += nphones
for (i, phone) in enumerate(phones):
if i==0 or phones[i]==';eword' or phones[i-1]==';eword':
pass # ignore phone 1 in utterance or word and word delimiters
else:
diphone = phones[i-1] + phones[i]
phondict[diphone] += 1
if i==1 or phones[i+1]==';eword' or phones[i-2]==';eword':
#print('boundary diphone: %s' % (diphone))
boundaries[diphone] += 1
#print('count: %i' % (boundaries[diphone]))
# reached iteration point? (round 1000)
if thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)
statscsv.writerow((csvline2))
# run again at end of file, if not round 1000 line count
if not thousand.search(str(linecount)):
csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)
for a in algos:
csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)
statscsv.writerow((csvline2))
myfile.close()
print('FINISHED')
print('see '+ statsfile)
|
flexible
|
{
"blob_id": "4ba0affd3cbdc2652274213a8d410b541fb3edb4",
"index": 4584,
"step-1": "<mask token>\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,\n wcount):\n lineout2 = deepcopy(lineout1)\n meanlength = round(pcount / wcount, 6)\n pboundary = round(wcount / pcount, 6)\n lineout2.append(wcount)\n lineout2.append(pcount)\n lineout2.append(meanlength)\n lineout2.append(pboundary)\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\n goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_gold-for-wordseg.txt')\n prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_prepared-for-wordseg.txt')\n segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '.txt')\n evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '_eval.txt')\n tmp = open(tmpfile, 'w')\n tmp.write(text)\n tmp.close()\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %\n (tmpfile, goldfile, prepfile))\n lineout2.append(algo)\n if algo == 'dibs':\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,\n algo, tmpfile, segfile))\n elif algo == 'utt_baseline':\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\n elif algo == 'rand_baseline':\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)\n )\n elif algo == 'unit_baseline':\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\n elif algo == 'oracle':\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,\n pboundary, segfile))\n elif algo == 'tp_ftp':\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_btp':\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_mi':\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,\n segfile))\n else:\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\n with open(evalfile, 'r') as eval:\n for line in eval:\n lineout2.append(re.sub('^[^\\\\d]*', '', line.rstrip()))\n eval.close()\n print(lineout2)\n return lineout2\n\n\n<mask token>\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances',\n 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',\n 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',\n 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',\n 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',\n 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',\n 'boundary.noedge.R', 'boundary.noedge.F'))\n<mask token>\nfor filein in glob.glob(directory + '*_phonemes.txt', recursive=True):\n print(filein)\n language, corpus, child = filein.split('/')[-1].split('_')[0:3]\n phondict = collections.Counter()\n boundaries = collections.Counter()\n phonecount = 0\n wordcount = 0\n with io.open(filein, 'r', encoding='utf8') as myfile:\n linecount = 0\n owucount = 0\n inputsofar = ''\n for line in myfile:\n inputsofar += line\n linecount += 1\n ewords = line.count(';eword')\n wordcount += ewords\n if ewords == 1:\n owucount += 1\n phones = line.split()\n nphones = len(phones) - ewords\n phonecount += nphones\n for i, phone in enumerate(phones):\n if i == 0 or phones[i] == ';eword' or phones[i - 1\n ] == ';eword':\n pass\n else:\n diphone = phones[i - 1] + phones[i]\n phondict[diphone] += 1\n if i == 1 or phones[i + 1] == ';eword' or phones[i - 2\n ] == ';eword':\n boundaries[diphone] += 1\n if thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n if not thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n myfile.close()\nprint('FINISHED')\nprint('see ' + statsfile)\n",
"step-3": "<mask token>\nuname = getpass.getuser()\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,\n wcount):\n lineout2 = deepcopy(lineout1)\n meanlength = round(pcount / wcount, 6)\n pboundary = round(wcount / pcount, 6)\n lineout2.append(wcount)\n lineout2.append(pcount)\n lineout2.append(meanlength)\n lineout2.append(pboundary)\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\n goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_gold-for-wordseg.txt')\n prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_prepared-for-wordseg.txt')\n segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '.txt')\n evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '_eval.txt')\n tmp = open(tmpfile, 'w')\n tmp.write(text)\n tmp.close()\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %\n (tmpfile, goldfile, prepfile))\n lineout2.append(algo)\n if algo == 'dibs':\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,\n algo, tmpfile, segfile))\n elif algo == 'utt_baseline':\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\n elif algo == 'rand_baseline':\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)\n )\n elif algo == 'unit_baseline':\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\n elif algo == 'oracle':\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,\n pboundary, segfile))\n elif algo == 'tp_ftp':\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_btp':\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_mi':\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,\n segfile))\n else:\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\n with open(evalfile, 'r') as eval:\n for line in eval:\n lineout2.append(re.sub('^[^\\\\d]*', '', line.rstrip()))\n eval.close()\n print(lineout2)\n return lineout2\n\n\nstatsfile = ('/Users/' + uname +\n '/Corpora/CHILDES/segmentation_experiment_stats.csv')\nstatsopen = open(statsfile, 'wt')\nstatscsv = csv.writer(statsopen)\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances',\n 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',\n 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',\n 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',\n 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',\n 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',\n 'boundary.noedge.R', 'boundary.noedge.F'))\nthousand = re.compile('000$')\nalgos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle',\n 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']\ndirectory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'\nfor filein in glob.glob(directory + '*_phonemes.txt', recursive=True):\n print(filein)\n language, corpus, child = filein.split('/')[-1].split('_')[0:3]\n phondict = collections.Counter()\n boundaries = collections.Counter()\n phonecount = 0\n wordcount = 0\n with io.open(filein, 'r', encoding='utf8') as myfile:\n linecount = 0\n owucount = 0\n inputsofar = ''\n for line in myfile:\n inputsofar += line\n linecount += 1\n ewords = line.count(';eword')\n wordcount += ewords\n if ewords == 1:\n owucount += 1\n phones = line.split()\n nphones = len(phones) - ewords\n phonecount += nphones\n for i, phone in enumerate(phones):\n if i == 0 or phones[i] == ';eword' or phones[i - 1\n ] == ';eword':\n pass\n else:\n diphone = phones[i - 1] + phones[i]\n phondict[diphone] += 1\n if i == 1 or phones[i + 1] == ';eword' or phones[i - 2\n ] == ';eword':\n boundaries[diphone] += 1\n if thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n if not thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n myfile.close()\nprint('FINISHED')\nprint('see ' + statsfile)\n",
"step-4": "from __future__ import division\nimport io, collections, os, glob, csv, re\nfrom scipy.stats import entropy\nfrom copy import deepcopy\nimport getpass\nuname = getpass.getuser()\n\n\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict,\n bdict):\n owu = owus / utts\n lineout1 = [language, corpus, child, utts, owu]\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\n tokencount = sum(pdict.values())\n lineout1.append(tokencount)\n typecount = len(ordered)\n lineout1.append(typecount)\n ttr = typecount / tokencount\n lineout1.append(ttr)\n boundarydist = []\n diphonedist = []\n k = 0\n diphfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_diphone-system.txt')\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n')\n for diph, denom in ordered:\n k += 1\n if bdict[diph]:\n num = bdict[diph]\n else:\n num = 0\n boundprob = num / denom\n boundarydist.append(boundprob)\n relfreq = denom / tokencount\n diphonedist.append(relfreq)\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph,\n relfreq, boundprob))\n writefile.close()\n boundaryH = entropy(boundarydist, qk=None, base=2)\n lineout1.append(boundaryH)\n diphoneH = entropy(diphonedist, qk=None, base=2)\n lineout1.append(diphoneH)\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\n cmd1 = 'rm ' + tmplnre\n os.system(cmd1)\n cmd2 = 'Rscript lnre.R ' + diphfile\n os.system(cmd2)\n if os.path.exists(tmplnre):\n with open(tmplnre, 'r') as lnre:\n for line in lnre:\n lineout1.append(line.rstrip())\n lnre.close()\n else:\n lineout1.append(0)\n lineout1.append(0)\n lineout1.append(0)\n return lineout1\n\n\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount,\n wcount):\n lineout2 = deepcopy(lineout1)\n meanlength = round(pcount / wcount, 6)\n pboundary = round(wcount / pcount, 6)\n lineout2.append(wcount)\n lineout2.append(pcount)\n lineout2.append(meanlength)\n lineout2.append(pboundary)\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\n goldfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_gold-for-wordseg.txt')\n prepfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_prepared-for-wordseg.txt')\n segfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '.txt')\n evalfile = ('/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language +\n '_' + corpus + '_' + child + '_' + str(lcount) +\n 'utterances_segmented-by_' + algo + '_eval.txt')\n tmp = open(tmpfile, 'w')\n tmp.write(text)\n tmp.close()\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' %\n (tmpfile, goldfile, prepfile))\n lineout2.append(algo)\n if algo == 'dibs':\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile,\n algo, tmpfile, segfile))\n elif algo == 'utt_baseline':\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\n elif algo == 'rand_baseline':\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile)\n )\n elif algo == 'unit_baseline':\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\n elif algo == 'oracle':\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile,\n pboundary, segfile))\n elif algo == 'tp_ftp':\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_btp':\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile,\n segfile))\n elif algo == 'tp_mi':\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile,\n segfile))\n else:\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\n with open(evalfile, 'r') as eval:\n for line in eval:\n lineout2.append(re.sub('^[^\\\\d]*', '', line.rstrip()))\n eval.close()\n print(lineout2)\n return lineout2\n\n\nstatsfile = ('/Users/' + uname +\n '/Corpora/CHILDES/segmentation_experiment_stats.csv')\nstatsopen = open(statsfile, 'wt')\nstatscsv = csv.writer(statsopen)\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances',\n 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy',\n 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones',\n 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR',\n 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P',\n 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P',\n 'boundary.noedge.R', 'boundary.noedge.F'))\nthousand = re.compile('000$')\nalgos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle',\n 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']\ndirectory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'\nfor filein in glob.glob(directory + '*_phonemes.txt', recursive=True):\n print(filein)\n language, corpus, child = filein.split('/')[-1].split('_')[0:3]\n phondict = collections.Counter()\n boundaries = collections.Counter()\n phonecount = 0\n wordcount = 0\n with io.open(filein, 'r', encoding='utf8') as myfile:\n linecount = 0\n owucount = 0\n inputsofar = ''\n for line in myfile:\n inputsofar += line\n linecount += 1\n ewords = line.count(';eword')\n wordcount += ewords\n if ewords == 1:\n owucount += 1\n phones = line.split()\n nphones = len(phones) - ewords\n phonecount += nphones\n for i, phone in enumerate(phones):\n if i == 0 or phones[i] == ';eword' or phones[i - 1\n ] == ';eword':\n pass\n else:\n diphone = phones[i - 1] + phones[i]\n phondict[diphone] += 1\n if i == 1 or phones[i + 1] == ';eword' or phones[i - 2\n ] == ';eword':\n boundaries[diphone] += 1\n if thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n if not thousand.search(str(linecount)):\n csvline1 = process_corpus(linecount, inputsofar, language,\n corpus, child, linecount, owucount, phondict, boundaries)\n for a in algos:\n csvline2 = word_seg(linecount, inputsofar, a, csvline1,\n language, corpus, child, phonecount, wordcount)\n statscsv.writerow(csvline2)\n myfile.close()\nprint('FINISHED')\nprint('see ' + statsfile)\n",
"step-5": "## n.b. uses python 3 wordseg virtualenv (wordseg needs Py3)\r\n# e.g. $ source ~/venvs/Py3/wordseg/bin/activate\r\n\r\n## wordseg: see https://wordseg.readthedocs.io\r\nfrom __future__ import division\r\nimport io, collections, os, glob, csv, re\r\nfrom scipy.stats import entropy\r\nfrom copy import deepcopy\r\n\r\n# get username\r\nimport getpass\r\nuname = getpass.getuser()\r\n\r\n## get corpus stats\r\ndef process_corpus(lcount, text, language, corpus, child, utts, owus, pdict, bdict):\r\n owu = owus/utts\r\n lineout1 = [language, corpus, child, utts, owu]\r\n # corpus types, tokens\r\n ordered = sorted(pdict.items(), key=lambda pair: pair[1], reverse=True)\r\n tokencount = sum(pdict.values())\r\n lineout1.append(tokencount)\r\n typecount = len(ordered)\r\n lineout1.append(typecount)\r\n ttr = typecount / tokencount\r\n lineout1.append(ttr)\r\n # diphone distributions\r\n boundarydist = []\r\n diphonedist = []\r\n k=0\r\n diphfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_diphone-system.txt'\r\n with io.open(diphfile, 'w', encoding='utf8') as writefile:\r\n writefile.write('k\\tf\\ttype\\trel.freq\\tboundary.prob\\n') # only columns 1-3 are used by lnre.R\r\n for diph, denom in ordered:\r\n k+=1\r\n if bdict[diph]:\r\n num = bdict[diph]\r\n else:\r\n num = 0\r\n boundprob = num / denom # boundary prob\r\n boundarydist.append(boundprob)\r\n relfreq = denom / tokencount # diphone prob\r\n diphonedist.append(relfreq)\r\n writefile.write('%i\\t%i\\t%s\\t%.6f\\t%.6f\\n' % (k, denom, diph, relfreq, boundprob))\r\n writefile.close()\r\n # entropy calcs\r\n boundaryH = entropy(boundarydist, qk=None, base=2)\r\n lineout1.append(boundaryH)\r\n diphoneH = entropy(diphonedist, qk=None, base=2)\r\n lineout1.append(diphoneH)\r\n # run Zipf LNRE fit (clear old file first)\r\n tmplnre = '/Users/' + uname + '/tmp/lnre.txt'\r\n cmd1 = 'rm '+ tmplnre\r\n os.system(cmd1)\r\n cmd2 = 'Rscript lnre.R '+ diphfile\r\n os.system(cmd2)\r\n if os.path.exists(tmplnre):\r\n with open(tmplnre, 'r') as lnre:\r\n for line in lnre:\r\n lineout1.append(line.rstrip())\r\n lnre.close()\r\n else: # else 3 zeros\r\n lineout1.append(0)\r\n lineout1.append(0)\r\n lineout1.append(0)\r\n # get C_WALS stat (not in use)\r\n #langcode = langcodes[lang]\r\n return lineout1\r\n\r\n## run wordseg\r\ndef word_seg(lcount, text, algo, lineout1, language, corpus, child, pcount, wcount):\r\n # start point is output of process_corpus()\r\n lineout2 = deepcopy(lineout1)\r\n meanlength = round(pcount/wcount, 6) # phones per word\r\n pboundary = round(wcount/pcount, 6) # words per phone\r\n lineout2.append(wcount)\r\n lineout2.append(pcount)\r\n lineout2.append(meanlength)\r\n lineout2.append(pboundary)\r\n # prepare filenames\r\n tmpfile = '/Users/' + uname + '/tmp/tmp.txt'\r\n goldfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_gold-for-wordseg.txt'\r\n prepfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_prepared-for-wordseg.txt'\r\n segfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '.txt'\r\n evalfile = '/Users/' + uname + '/Corpora/CHILDES/wordseg/' + language + '_' + corpus + '_' + child + '_' + str(lcount) + 'utterances_segmented-by_' + algo + '_eval.txt'\r\n # write text so far to temporary file\r\n tmp = open(tmpfile, 'w')\r\n tmp.write(text)\r\n tmp.close()\r\n # prepare gold and input files for wordseg\r\n os.system('cat %s | wordseg-prep -u phone --punctuation --gold %s > %s' % (tmpfile, goldfile, prepfile)) # ignore punctuation\r\n lineout2.append(algo)\r\n # run wordseg command\r\n if algo=='dibs': # DIBS-phrasal uses phrases (utterances) as chunks\r\n os.system('cat %s | wordseg-%s -t phrasal %s > %s' % (prepfile, algo, tmpfile, segfile))\r\n elif algo=='utt_baseline': # utterance baseline\r\n os.system('cat %s | wordseg-baseline -P 0 > %s' % (prepfile, segfile))\r\n elif algo=='rand_baseline': # random baseline\r\n os.system('cat %s | wordseg-baseline -P 0.5 > %s' % (prepfile, segfile))\r\n elif algo=='unit_baseline': # basic unit baseline\r\n os.system('cat %s | wordseg-baseline -P 1 > %s' % (prepfile, segfile))\r\n elif algo=='oracle': # oracle baseline: P(word|phone)\r\n os.system('cat %s | wordseg-baseline -P %.6f > %s' % (prepfile, pboundary, segfile))\r\n elif algo=='tp_ftp': # transitional prob: forwards\r\n os.system('cat %s | wordseg-tp -d ftp -t absolute > %s' % (prepfile, segfile))\r\n elif algo=='tp_btp': # transitional prob: forwards\r\n os.system('cat %s | wordseg-tp -d btp -t absolute > %s' % (prepfile, segfile))\r\n elif algo=='tp_mi': # transitional prob: mutual information\r\n os.system('cat %s | wordseg-tp -d mi -t absolute > %s' % (prepfile, segfile))\r\n else:\r\n os.system('cat %s | wordseg-%s > %s' % (prepfile, algo, segfile))\r\n # evaluate\r\n os.system('cat %s | wordseg-eval %s > %s' % (segfile, goldfile, evalfile))\r\n with open(evalfile, 'r') as eval:\r\n for line in eval:\r\n lineout2.append(re.sub('^[^\\d]*', '', line.rstrip())) # strip from the start until first number encountered\r\n eval.close()\r\n print(lineout2)\r\n return lineout2\r\n\r\n## open results file\r\nstatsfile = '/Users/' + uname + '/Corpora/CHILDES/segmentation_experiment_stats.csv'\r\nstatsopen = open(statsfile,'wt')\r\nstatscsv = csv.writer(statsopen)\r\nstatscsv.writerow(('language', 'corpus', 'child', 'n.utterances', 'prop.owus', 'tokens', 'types', 'TTR', 'boundary.entropy', 'diphone.entropy', 'zm.alpha', 'zm.X2', 'zm.p', 'n.words', 'n.phones', 'mean.phones.per.word', 'boundary.prob', 'wordseg', 'typeP', 'typeR', 'typeF', 'tokenP', 'tokenR', 'tokenF', 'boundary.all.P', 'boundary.all.R', 'boundary.all.F', 'boundary.noedge.P', 'boundary.noedge.R', 'boundary.noedge.F'))\r\n\r\n## input directory (the phonemized files)\r\nthousand = re.compile('000$')\r\nalgos = ['utt_baseline', 'rand_baseline', 'unit_baseline', 'oracle', 'tp_ftp', 'tp_btp', 'tp_mi', 'dibs', 'puddle']\r\ndirectory = '/Users/' + uname + '/Corpora/CHILDES/phonemized/'\r\nfor filein in glob.glob(directory+'*_phonemes.txt', recursive=True):\r\n print(filein)\r\n # parse filename\r\n (language, corpus, child) = filein.split('/')[-1].split('_')[0:3]\r\n # read corpus\r\n phondict = collections.Counter()\r\n boundaries = collections.Counter()\r\n phonecount = 0\r\n wordcount = 0\r\n with io.open(filein, 'r', encoding='utf8') as myfile:\r\n linecount = 0\r\n owucount = 0\r\n inputsofar = ''\r\n for line in myfile:\r\n inputsofar += line\r\n linecount += 1\r\n ewords = line.count(';eword')\r\n wordcount += ewords\r\n if ewords==1:\r\n owucount += 1\r\n #print('utterance: %s' % (line.rstrip()))\r\n phones = line.split() # split on whitespace\r\n nphones = len(phones) - ewords\r\n phonecount += nphones\r\n for (i, phone) in enumerate(phones):\r\n if i==0 or phones[i]==';eword' or phones[i-1]==';eword':\r\n pass # ignore phone 1 in utterance or word and word delimiters\r\n else:\r\n diphone = phones[i-1] + phones[i]\r\n phondict[diphone] += 1\r\n if i==1 or phones[i+1]==';eword' or phones[i-2]==';eword':\r\n #print('boundary diphone: %s' % (diphone))\r\n boundaries[diphone] += 1\r\n #print('count: %i' % (boundaries[diphone]))\r\n # reached iteration point? (round 1000)\r\n if thousand.search(str(linecount)):\r\n csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)\r\n for a in algos:\r\n csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)\r\n statscsv.writerow((csvline2))\r\n # run again at end of file, if not round 1000 line count\r\n if not thousand.search(str(linecount)):\r\n csvline1 = process_corpus(linecount, inputsofar, language, corpus, child, linecount, owucount, phondict, boundaries)\r\n for a in algos:\r\n csvline2 = word_seg(linecount, inputsofar, a, csvline1, language, corpus, child, phonecount, wordcount)\r\n statscsv.writerow((csvline2))\r\n myfile.close()\r\n\r\nprint('FINISHED')\r\nprint('see '+ statsfile)\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def random():
"""Return a random parameter set for the model."""
radius = 10 ** np.random.uniform(1.3, 4)
d_factor = 10 ** np.random.uniform(-2, -0.7)
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius * 4 / np.sqrt(3) / dnn_fraction
pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)
return pars
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
name = 'bcc_paracrystal'
title = 'Body-centred cubic lattic with paracrystalline distortion'
description = """
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = 'shape:paracrystal'
single = False
parameters = [['dnn', 'Ang', 220, [-inf, inf], '',
'Nearest neighbour distance'], ['d_factor', '', 0.06, [-inf, inf], '',
'Paracrystal distortion factor'], ['radius', 'Ang', 40, [0, inf],
'volume', 'Particle radius'], ['sld', '1e-6/Ang^2', 4, [-inf, inf],
'sld', 'Particle scattering length density'], ['sld_solvent',
'1e-6/Ang^2', 1, [-inf, inf], 'sld',
'Solvent scattering length density'], ['theta', 'degrees', 60, [-360,
360], 'orientation', 'c axis to beam angle'], ['phi', 'degrees', 60, [-
360, 360], 'orientation', 'rotation about beam'], ['psi', 'degrees', 60,
[-360, 360], 'orientation', 'rotation about c axis']]
source = ['lib/sas_3j1x_x.c', 'lib/gauss150.c', 'lib/sphere_form.c',
'bcc_paracrystal.c']
def random():
"""Return a random parameter set for the model."""
radius = 10 ** np.random.uniform(1.3, 4)
d_factor = 10 ** np.random.uniform(-2, -0.7)
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius * 4 / np.sqrt(3) / dnn_fraction
pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)
return pars
q = 4.0 * pi / 220.0
tests = [[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993,
0.005367008206852725]]]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from numpy import inf, pi
name = 'bcc_paracrystal'
title = 'Body-centred cubic lattic with paracrystalline distortion'
description = """
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = 'shape:paracrystal'
single = False
parameters = [['dnn', 'Ang', 220, [-inf, inf], '',
'Nearest neighbour distance'], ['d_factor', '', 0.06, [-inf, inf], '',
'Paracrystal distortion factor'], ['radius', 'Ang', 40, [0, inf],
'volume', 'Particle radius'], ['sld', '1e-6/Ang^2', 4, [-inf, inf],
'sld', 'Particle scattering length density'], ['sld_solvent',
'1e-6/Ang^2', 1, [-inf, inf], 'sld',
'Solvent scattering length density'], ['theta', 'degrees', 60, [-360,
360], 'orientation', 'c axis to beam angle'], ['phi', 'degrees', 60, [-
360, 360], 'orientation', 'rotation about beam'], ['psi', 'degrees', 60,
[-360, 360], 'orientation', 'rotation about c axis']]
source = ['lib/sas_3j1x_x.c', 'lib/gauss150.c', 'lib/sphere_form.c',
'bcc_paracrystal.c']
def random():
"""Return a random parameter set for the model."""
radius = 10 ** np.random.uniform(1.3, 4)
d_factor = 10 ** np.random.uniform(-2, -0.7)
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius * 4 / np.sqrt(3) / dnn_fraction
pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)
return pars
q = 4.0 * pi / 220.0
tests = [[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993,
0.005367008206852725]]]
<|reserved_special_token_1|>
r"""
Definition
----------
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be negligible,
and the size of the paracrystal is infinitely large. Paracrystalline distortion
is assumed to be isotropic and characterized by a Gaussian distribution.
The scattering intensity $I(q)$ is calculated as
.. math::
I(q) = \frac{\text{scale}}{V_p} V_\text{lattice} P(q) Z(q) + \text{background}
where *scale* is the volume fraction of crystal in the sample volume,
$V_\text{lattice}$ is the volume fraction of spheres in the crystal, $V_p$ is
the volume of the primary particle, $P(q)$ is the form factor of the sphere
(normalized), and $Z(q)$ is the paracrystalline structure factor for a
body-centered cubic structure.
.. note::
At this point the GUI does not return $V_\text{lattice}$ separately so that
the user will need to calculate it from the equation given and the
appropriate returned parameters.
.. warning::
As per the equations below, this model will return I(q)=0 for all q if the
distortion factor is equal to 0. The model is not meant to support perfect
crystals.
.. figure:: img/bcc_geometry.jpg
Body-centered cubic (BCC) lattice taken from reference [#Matsuoka1987]_.
Following the derivation from reference [#Matsuoka1987]_, as corrected in
reference [#Matsuoka1990]_, and based on the above figure, the
primitive unit cell vectors $\vec{a_1},\vec{a_2}$, and $\vec{a_3}$, which
enclose the smallest possible unit cell for the bcc lattice, are defined below:
.. math::
\vec{a_1} &= \frac{1}{2}(-\vec{b_1} + \vec{b_2} + \vec{b_3}) \\
\vec{a_2} &= \frac{1}{2} (\vec{b_1} - \vec{b_2} + \vec{b_3}) \\
\vec{a_3} &= \frac{1}{2}(\vec{b_1} + \vec{b_2} -\vec{b_3}).
where $\vec{b_1},\vec{b_2}$, and $\vec{b_3}$ are the unit cell vectors of the
conventional unit cell, which is a unit cell that includes the full symmetry
of the lattice. As defined by reference [#Matsuoka1987]_, the constant $a$ is the
lattice parameter of the conventional unit cell with
$|\vec{b_1}|=|\vec{b_2}|=|\vec{b_3}|=a$. Using this definition, the
nearest-neighbor distance ($D$) is given by
$D=|\vec{a_1}|=|\vec{a_2}|=|\vec{a_3}|=\sqrt{(a/2)^2+(a/2)^2+(a/2)^2}=\sqrt{\frac{3a^2}{4}}=\frac{\sqrt{3}a}{2}$.
The volume of the primitive unit cell $V_u$ is then given by:
.. math::
V_u &= |(\vec{a_1}\times \vec{a_2})\cdot\vec{a_3}|\\
&= (\frac{a^2}{2},\frac{a^2}{2},0)\cdot(\frac{a}{2},\frac{a}{2},-\frac{a}{2})\\
&= a^3/2
In this case, the volume fraction ($V_{lattice}$) of spherical particles with
radius $R$ sitting on the bcc lattice is given by:
.. math::
V_{lattice} &= \frac{4/3 \pi R^3}{a^3/2}\\
&= \frac{8\pi R^3}{3a^3}\\
&= \frac{\sqrt{3} \pi R^3}{D^3}
Now, continuing to follow [#Matsuoka1987]_, the structure (lattice)
factor $Z(\vec{q})$ for a 3D paracrystal can be written as:
.. math::
Z(\vec{q}) = \prod_{k=1}^{3}Z_k(\vec{q})
with
.. math::
Z_k(\vec{q}) = \frac{1-|F_k|^2}{1-2|F_k|\cos(\vec{a_k}\cdot\vec{q})+|F_k|^2}
and where $F_k(\vec{q})$ is the structure factor of the primitive unit cell
defined as:
.. math::
F_k(\vec{q}) = e^{-\frac{1}{2} \Delta a^2_k q^2} \times e^{-i\vec{q}\cdot\vec{a_k}}.
Here, $\vec{a_k}$ are the primitive unit cell vectors $\vec{a_1}$, $\vec{a_2}$,
and $\vec{a_3}$. Furthermore, $\Delta a_k$ is the isotropic distortion of the
lattice point from its ideal position and can be defined by a constant factor
$g=\Delta a / |\vec{a_1}| = \Delta a / |\vec{a_2}| = \Delta a / |\vec{a_3}|=\Delta a/D$.
Finally, assuming the definitions presented in this document, the authors of
reference [#Matsuoka1987]_ have derived the lattice factors which are given by:
.. math::
Z_1(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qa}{2}(\sin\theta \cos\phi + \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_2(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qa}{2}(-\sin\theta \cos\phi - \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_3(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qa}{2}(-\sin\theta \cos\phi + \sin\theta \sin\phi - \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Note that Sasview is using the nearest-neighbor parameter ($D$) as an input
instead of the conventional unit cell parameter $a$. In this case, using
$a=\frac{2D}{\sqrt{3}}$, we rewrite $Z_1(q)$, $Z_2(q)$, and $Z_3(q)$ in terms
of $D$ instead of $a$, which leads to:
.. math::
Z_1(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qD}{\sqrt{3}}(\sin\theta \cos\phi + \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_2(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qD}{\sqrt{3}}(-\sin\theta \cos\phi - \sin\theta \sin\phi + \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Z_3(q,\theta,\phi)&=[1-e^{-q^2\Delta a^2}]/\{1-2e^{-\frac{1}{2}q^2\Delta a^2}\cos[\frac{qD}{\sqrt{3}}(-\sin\theta \cos\phi + \sin\theta \sin\phi - \cos\theta)] + e^{-q^2\Delta a^2}\}\\
Finally note that the position of the Bragg peaks for the bcc lattice are
indexed by (reduced q-values):
.. math::
\frac{qa}{2\pi}=\frac{qD}{\sqrt{3}\pi}=\sqrt{h^2+k^2+l^2}.
In the above equation, we used the conventional unit cell so not all
permutations of h,k, and l will produce Bragg peaks. The Bragg scattering
condition for bcc imposes that h+k+l = even. Thus the peak positions
correspond to (just the first 5)
.. math::
\begin{array}{lccccc}
q/q_o & 1 & \sqrt{2} & \sqrt{3} & \sqrt{4} & \sqrt{5} \\
\text{Indices} & (110) & (200) & (211) & (220) & (310) \\
\end{array}
.. note::
The calculation of $Z(q)$ is a double numerical integral that must be
carried out with a high density of points to properly capture the sharp
peaks of the paracrystalline scattering. So be warned that the calculation
is slow. Fitting of any experimental data must be resolution smeared for
any meaningful fit. This makes a triple integral which may be very slow.
If a double-precision GPU with OpenCL support is available this may improve
the speed of the calculation.
This example dataset is produced using 200 data points,
*qmin* = 0.001 |Ang^-1|, *qmax* = 0.1 |Ang^-1| and the above default values.
The 2D (Anisotropic model) is based on the reference below where $I(q)$ is
approximated for 1d scattering. Thus the scattering pattern for 2D may not be
accurate, particularly at low $q$. For general details of the calculation and
angular dispersions for oriented particles see :ref:`orientation`. Note that
we are not responsible for any incorrectness of the 2D model computation.
.. figure:: img/parallelepiped_angle_definition.png
Orientation of the crystal with respect to the scattering plane, when
$\theta = \phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).
References
----------
.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)
1754-1765 (Original Paper)
.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)
3854-3856 (Corrections to FCC and BCC lattice structure calculation)
Authorship and Verification
---------------------------
* **Author:** NIST IGOR/DANSE **Date:** pre 2010
* **Last Modified by:** Jonathan Gaudet **Date:** September 26, 2022
* **Last Reviewed by:** Paul Butler **Date:** November 2, 2022
"""
import numpy as np
from numpy import inf, pi
name = "bcc_paracrystal"
title = "Body-centred cubic lattic with paracrystalline distortion"
description = """
Calculates the scattering from a **body-centered cubic lattice** with
paracrystalline distortion. Thermal vibrations are considered to be
negligible, and the size of the paracrystal is infinitely large.
Paracrystalline distortion is assumed to be isotropic and characterized
by a Gaussian distribution.
"""
category = "shape:paracrystal"
#note - calculation requires double precision
single = False
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description" ],
parameters = [["dnn", "Ang", 220, [-inf, inf], "", "Nearest neighbour distance"],
["d_factor", "", 0.06, [-inf, inf], "", "Paracrystal distortion factor"],
["radius", "Ang", 40, [0, inf], "volume", "Particle radius"],
["sld", "1e-6/Ang^2", 4, [-inf, inf], "sld", "Particle scattering length density"],
["sld_solvent", "1e-6/Ang^2", 1, [-inf, inf], "sld", "Solvent scattering length density"],
["theta", "degrees", 60, [-360, 360], "orientation", "c axis to beam angle"],
["phi", "degrees", 60, [-360, 360], "orientation", "rotation about beam"],
["psi", "degrees", 60, [-360, 360], "orientation", "rotation about c axis"]
]
# pylint: enable=bad-whitespace, line-too-long
source = ["lib/sas_3j1x_x.c", "lib/gauss150.c", "lib/sphere_form.c", "bcc_paracrystal.c"]
def random():
"""Return a random parameter set for the model."""
# Define lattice spacing as a multiple of the particle radius
# using the formula a = 4 r/sqrt(3). Systems which are ordered
# are probably mostly filled, so use a distribution which goes from
# zero to one, but leaving 90% of them within 80% of the
# maximum bcc packing. Lattice distortion values are empirically
# useful between 0.01 and 0.7. Use an exponential distribution
# in this range 'cuz its easy.
radius = 10**np.random.uniform(1.3, 4)
d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7
dnn_fraction = np.random.beta(a=10, b=1)
dnn = radius*4/np.sqrt(3)/dnn_fraction
pars = dict(
#sld=1, sld_solvent=0, scale=1, background=1e-32,
dnn=dnn,
d_factor=d_factor,
radius=radius,
)
return pars
# april 6 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!
# add 2d test later
# October 26, 2022 PDB updated the 1D unit test after fixing the math. The values are again
# assumed correct. It would be good to have an independent assessment. 2D tests remain
# on the todo list
# TODO: fix the 2d tests
q = 4.*pi/220.
tests = [
[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, 0.005367008206852725]],
#[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.017, 0.035), 2082.20264399],
#[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.081, 0.011), 0.436323144781],
]
|
flexible
|
{
"blob_id": "7ccaa15f025b2c1ba560d07c1a30b06c9ebf9ad1",
"index": 1927,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n radius = 10 ** np.random.uniform(1.3, 4)\n d_factor = 10 ** np.random.uniform(-2, -0.7)\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius * 4 / np.sqrt(3) / dnn_fraction\n pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)\n return pars\n\n\n<mask token>\n",
"step-3": "<mask token>\nname = 'bcc_paracrystal'\ntitle = 'Body-centred cubic lattic with paracrystalline distortion'\ndescription = \"\"\"\n Calculates the scattering from a **body-centered cubic lattice** with\n paracrystalline distortion. Thermal vibrations are considered to be\n negligible, and the size of the paracrystal is infinitely large.\n Paracrystalline distortion is assumed to be isotropic and characterized\n by a Gaussian distribution.\n \"\"\"\ncategory = 'shape:paracrystal'\nsingle = False\nparameters = [['dnn', 'Ang', 220, [-inf, inf], '',\n 'Nearest neighbour distance'], ['d_factor', '', 0.06, [-inf, inf], '',\n 'Paracrystal distortion factor'], ['radius', 'Ang', 40, [0, inf],\n 'volume', 'Particle radius'], ['sld', '1e-6/Ang^2', 4, [-inf, inf],\n 'sld', 'Particle scattering length density'], ['sld_solvent',\n '1e-6/Ang^2', 1, [-inf, inf], 'sld',\n 'Solvent scattering length density'], ['theta', 'degrees', 60, [-360, \n 360], 'orientation', 'c axis to beam angle'], ['phi', 'degrees', 60, [-\n 360, 360], 'orientation', 'rotation about beam'], ['psi', 'degrees', 60,\n [-360, 360], 'orientation', 'rotation about c axis']]\nsource = ['lib/sas_3j1x_x.c', 'lib/gauss150.c', 'lib/sphere_form.c',\n 'bcc_paracrystal.c']\n\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n radius = 10 ** np.random.uniform(1.3, 4)\n d_factor = 10 ** np.random.uniform(-2, -0.7)\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius * 4 / np.sqrt(3) / dnn_fraction\n pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)\n return pars\n\n\nq = 4.0 * pi / 220.0\ntests = [[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, \n 0.005367008206852725]]]\n",
"step-4": "<mask token>\nimport numpy as np\nfrom numpy import inf, pi\nname = 'bcc_paracrystal'\ntitle = 'Body-centred cubic lattic with paracrystalline distortion'\ndescription = \"\"\"\n Calculates the scattering from a **body-centered cubic lattice** with\n paracrystalline distortion. Thermal vibrations are considered to be\n negligible, and the size of the paracrystal is infinitely large.\n Paracrystalline distortion is assumed to be isotropic and characterized\n by a Gaussian distribution.\n \"\"\"\ncategory = 'shape:paracrystal'\nsingle = False\nparameters = [['dnn', 'Ang', 220, [-inf, inf], '',\n 'Nearest neighbour distance'], ['d_factor', '', 0.06, [-inf, inf], '',\n 'Paracrystal distortion factor'], ['radius', 'Ang', 40, [0, inf],\n 'volume', 'Particle radius'], ['sld', '1e-6/Ang^2', 4, [-inf, inf],\n 'sld', 'Particle scattering length density'], ['sld_solvent',\n '1e-6/Ang^2', 1, [-inf, inf], 'sld',\n 'Solvent scattering length density'], ['theta', 'degrees', 60, [-360, \n 360], 'orientation', 'c axis to beam angle'], ['phi', 'degrees', 60, [-\n 360, 360], 'orientation', 'rotation about beam'], ['psi', 'degrees', 60,\n [-360, 360], 'orientation', 'rotation about c axis']]\nsource = ['lib/sas_3j1x_x.c', 'lib/gauss150.c', 'lib/sphere_form.c',\n 'bcc_paracrystal.c']\n\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n radius = 10 ** np.random.uniform(1.3, 4)\n d_factor = 10 ** np.random.uniform(-2, -0.7)\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius * 4 / np.sqrt(3) / dnn_fraction\n pars = dict(dnn=dnn, d_factor=d_factor, radius=radius)\n return pars\n\n\nq = 4.0 * pi / 220.0\ntests = [[{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, \n 0.005367008206852725]]]\n",
"step-5": "r\"\"\"\nDefinition\n----------\n\nCalculates the scattering from a **body-centered cubic lattice** with\nparacrystalline distortion. Thermal vibrations are considered to be negligible,\nand the size of the paracrystal is infinitely large. Paracrystalline distortion\nis assumed to be isotropic and characterized by a Gaussian distribution.\n\nThe scattering intensity $I(q)$ is calculated as\n\n.. math::\n\n I(q) = \\frac{\\text{scale}}{V_p} V_\\text{lattice} P(q) Z(q) + \\text{background}\n\nwhere *scale* is the volume fraction of crystal in the sample volume,\n$V_\\text{lattice}$ is the volume fraction of spheres in the crystal, $V_p$ is\nthe volume of the primary particle, $P(q)$ is the form factor of the sphere\n(normalized), and $Z(q)$ is the paracrystalline structure factor for a\nbody-centered cubic structure.\n\n.. note::\n At this point the GUI does not return $V_\\text{lattice}$ separately so that\n the user will need to calculate it from the equation given and the\n appropriate returned parameters.\n\n.. warning::\n As per the equations below, this model will return I(q)=0 for all q if the\n distortion factor is equal to 0. The model is not meant to support perfect\n crystals.\n\n.. figure:: img/bcc_geometry.jpg\n\n Body-centered cubic (BCC) lattice taken from reference [#Matsuoka1987]_.\n\nFollowing the derivation from reference [#Matsuoka1987]_, as corrected in\nreference [#Matsuoka1990]_, and based on the above figure, the\nprimitive unit cell vectors $\\vec{a_1},\\vec{a_2}$, and $\\vec{a_3}$, which\nenclose the smallest possible unit cell for the bcc lattice, are defined below:\n\n.. math::\n \\vec{a_1} &= \\frac{1}{2}(-\\vec{b_1} + \\vec{b_2} + \\vec{b_3}) \\\\\n \\vec{a_2} &= \\frac{1}{2} (\\vec{b_1} - \\vec{b_2} + \\vec{b_3}) \\\\\n \\vec{a_3} &= \\frac{1}{2}(\\vec{b_1} + \\vec{b_2} -\\vec{b_3}).\n\nwhere $\\vec{b_1},\\vec{b_2}$, and $\\vec{b_3}$ are the unit cell vectors of the\nconventional unit cell, which is a unit cell that includes the full symmetry\nof the lattice. As defined by reference [#Matsuoka1987]_, the constant $a$ is the\nlattice parameter of the conventional unit cell with\n$|\\vec{b_1}|=|\\vec{b_2}|=|\\vec{b_3}|=a$. Using this definition, the\nnearest-neighbor distance ($D$) is given by\n$D=|\\vec{a_1}|=|\\vec{a_2}|=|\\vec{a_3}|=\\sqrt{(a/2)^2+(a/2)^2+(a/2)^2}=\\sqrt{\\frac{3a^2}{4}}=\\frac{\\sqrt{3}a}{2}$.\n\nThe volume of the primitive unit cell $V_u$ is then given by:\n\n.. math::\n V_u &= |(\\vec{a_1}\\times \\vec{a_2})\\cdot\\vec{a_3}|\\\\\n &= (\\frac{a^2}{2},\\frac{a^2}{2},0)\\cdot(\\frac{a}{2},\\frac{a}{2},-\\frac{a}{2})\\\\\n &= a^3/2\n\nIn this case, the volume fraction ($V_{lattice}$) of spherical particles with\nradius $R$ sitting on the bcc lattice is given by:\n\n.. math::\n V_{lattice} &= \\frac{4/3 \\pi R^3}{a^3/2}\\\\\n &= \\frac{8\\pi R^3}{3a^3}\\\\\n &= \\frac{\\sqrt{3} \\pi R^3}{D^3}\n\nNow, continuing to follow [#Matsuoka1987]_, the structure (lattice)\nfactor $Z(\\vec{q})$ for a 3D paracrystal can be written as:\n\n.. math::\n Z(\\vec{q}) = \\prod_{k=1}^{3}Z_k(\\vec{q})\n\nwith\n\n.. math::\n Z_k(\\vec{q}) = \\frac{1-|F_k|^2}{1-2|F_k|\\cos(\\vec{a_k}\\cdot\\vec{q})+|F_k|^2}\n\nand where $F_k(\\vec{q})$ is the structure factor of the primitive unit cell\ndefined as:\n\n.. math::\n F_k(\\vec{q}) = e^{-\\frac{1}{2} \\Delta a^2_k q^2} \\times e^{-i\\vec{q}\\cdot\\vec{a_k}}.\n\nHere, $\\vec{a_k}$ are the primitive unit cell vectors $\\vec{a_1}$, $\\vec{a_2}$,\nand $\\vec{a_3}$. Furthermore, $\\Delta a_k$ is the isotropic distortion of the\nlattice point from its ideal position and can be defined by a constant factor\n$g=\\Delta a / |\\vec{a_1}| = \\Delta a / |\\vec{a_2}| = \\Delta a / |\\vec{a_3}|=\\Delta a/D$.\n\nFinally, assuming the definitions presented in this document, the authors of\nreference [#Matsuoka1987]_ have derived the lattice factors which are given by:\n\n.. math::\n Z_1(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qa}{2}(\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_2(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qa}{2}(-\\sin\\theta \\cos\\phi - \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_3(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qa}{2}(-\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi - \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n\nNote that Sasview is using the nearest-neighbor parameter ($D$) as an input\ninstead of the conventional unit cell parameter $a$. In this case, using\n$a=\\frac{2D}{\\sqrt{3}}$, we rewrite $Z_1(q)$, $Z_2(q)$, and $Z_3(q)$ in terms\nof $D$ instead of $a$, which leads to:\n\n.. math::\n Z_1(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qD}{\\sqrt{3}}(\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_2(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qD}{\\sqrt{3}}(-\\sin\\theta \\cos\\phi - \\sin\\theta \\sin\\phi + \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n Z_3(q,\\theta,\\phi)&=[1-e^{-q^2\\Delta a^2}]/\\{1-2e^{-\\frac{1}{2}q^2\\Delta a^2}\\cos[\\frac{qD}{\\sqrt{3}}(-\\sin\\theta \\cos\\phi + \\sin\\theta \\sin\\phi - \\cos\\theta)] + e^{-q^2\\Delta a^2}\\}\\\\\n\nFinally note that the position of the Bragg peaks for the bcc lattice are\nindexed by (reduced q-values):\n\n.. math::\n \\frac{qa}{2\\pi}=\\frac{qD}{\\sqrt{3}\\pi}=\\sqrt{h^2+k^2+l^2}.\n\nIn the above equation, we used the conventional unit cell so not all\npermutations of h,k, and l will produce Bragg peaks. The Bragg scattering\ncondition for bcc imposes that h+k+l = even. Thus the peak positions\ncorrespond to (just the first 5)\n\n.. math::\n\n \\begin{array}{lccccc}\n q/q_o & 1 & \\sqrt{2} & \\sqrt{3} & \\sqrt{4} & \\sqrt{5} \\\\\n \\text{Indices} & (110) & (200) & (211) & (220) & (310) \\\\\n \\end{array}\n\n.. note::\n\n The calculation of $Z(q)$ is a double numerical integral that must be\n carried out with a high density of points to properly capture the sharp\n peaks of the paracrystalline scattering. So be warned that the calculation\n is slow. Fitting of any experimental data must be resolution smeared for\n any meaningful fit. This makes a triple integral which may be very slow.\n If a double-precision GPU with OpenCL support is available this may improve\n the speed of the calculation.\n\nThis example dataset is produced using 200 data points,\n*qmin* = 0.001 |Ang^-1|, *qmax* = 0.1 |Ang^-1| and the above default values.\n\nThe 2D (Anisotropic model) is based on the reference below where $I(q)$ is\napproximated for 1d scattering. Thus the scattering pattern for 2D may not be\naccurate, particularly at low $q$. For general details of the calculation and\nangular dispersions for oriented particles see :ref:`orientation`. Note that\nwe are not responsible for any incorrectness of the 2D model computation.\n\n.. figure:: img/parallelepiped_angle_definition.png\n\n Orientation of the crystal with respect to the scattering plane, when\n $\\theta = \\phi = 0$ the $c$ axis is along the beam direction (the $z$ axis).\n\nReferences\n----------\n\n.. [#Matsuoka1987] Hideki Matsuoka et. al. *Physical Review B*, 36 (1987)\n 1754-1765 (Original Paper)\n.. [#Matsuoka1990] Hideki Matsuoka et. al. *Physical Review B*, 41 (1990)\n 3854-3856 (Corrections to FCC and BCC lattice structure calculation)\n\nAuthorship and Verification\n---------------------------\n\n* **Author:** NIST IGOR/DANSE **Date:** pre 2010\n* **Last Modified by:** Jonathan Gaudet **Date:** September 26, 2022\n* **Last Reviewed by:** Paul Butler **Date:** November 2, 2022\n\"\"\"\n\nimport numpy as np\nfrom numpy import inf, pi\n\nname = \"bcc_paracrystal\"\ntitle = \"Body-centred cubic lattic with paracrystalline distortion\"\ndescription = \"\"\"\n Calculates the scattering from a **body-centered cubic lattice** with\n paracrystalline distortion. Thermal vibrations are considered to be\n negligible, and the size of the paracrystal is infinitely large.\n Paracrystalline distortion is assumed to be isotropic and characterized\n by a Gaussian distribution.\n \"\"\"\ncategory = \"shape:paracrystal\"\n\n#note - calculation requires double precision\nsingle = False\n\n# pylint: disable=bad-whitespace, line-too-long\n# [\"name\", \"units\", default, [lower, upper], \"type\",\"description\" ],\nparameters = [[\"dnn\", \"Ang\", 220, [-inf, inf], \"\", \"Nearest neighbour distance\"],\n [\"d_factor\", \"\", 0.06, [-inf, inf], \"\", \"Paracrystal distortion factor\"],\n [\"radius\", \"Ang\", 40, [0, inf], \"volume\", \"Particle radius\"],\n [\"sld\", \"1e-6/Ang^2\", 4, [-inf, inf], \"sld\", \"Particle scattering length density\"],\n [\"sld_solvent\", \"1e-6/Ang^2\", 1, [-inf, inf], \"sld\", \"Solvent scattering length density\"],\n [\"theta\", \"degrees\", 60, [-360, 360], \"orientation\", \"c axis to beam angle\"],\n [\"phi\", \"degrees\", 60, [-360, 360], \"orientation\", \"rotation about beam\"],\n [\"psi\", \"degrees\", 60, [-360, 360], \"orientation\", \"rotation about c axis\"]\n ]\n# pylint: enable=bad-whitespace, line-too-long\n\nsource = [\"lib/sas_3j1x_x.c\", \"lib/gauss150.c\", \"lib/sphere_form.c\", \"bcc_paracrystal.c\"]\n\ndef random():\n \"\"\"Return a random parameter set for the model.\"\"\"\n # Define lattice spacing as a multiple of the particle radius\n # using the formula a = 4 r/sqrt(3). Systems which are ordered\n # are probably mostly filled, so use a distribution which goes from\n # zero to one, but leaving 90% of them within 80% of the\n # maximum bcc packing. Lattice distortion values are empirically\n # useful between 0.01 and 0.7. Use an exponential distribution\n # in this range 'cuz its easy.\n radius = 10**np.random.uniform(1.3, 4)\n d_factor = 10**np.random.uniform(-2, -0.7) # sigma_d in 0.01-0.7\n dnn_fraction = np.random.beta(a=10, b=1)\n dnn = radius*4/np.sqrt(3)/dnn_fraction\n pars = dict(\n #sld=1, sld_solvent=0, scale=1, background=1e-32,\n dnn=dnn,\n d_factor=d_factor,\n radius=radius,\n )\n return pars\n\n# april 6 2017, rkh add unit tests, NOT compared with any other calc method, assume correct!\n# add 2d test later\n\n# October 26, 2022 PDB updated the 1D unit test after fixing the math. The values are again\n# assumed correct. It would be good to have an independent assessment. 2D tests remain\n# on the todo list\n# TODO: fix the 2d tests\nq = 4.*pi/220.\ntests = [\n [{}, [0.001, q, 0.25], [0.6945817843046642, 1.6885157981411993, 0.005367008206852725]],\n #[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.017, 0.035), 2082.20264399],\n #[{'theta': 20.0, 'phi': 30, 'psi': 40.0}, (-0.081, 0.011), 0.436323144781],\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def favorite_book(name):
print(f"One of my favorite books is {name}...")
favorite_book("Alice in Wonderland")
|
normal
|
{
"blob_id": "08848e51d5564bad927607be3fa3c86f2c1212c5",
"index": 9668,
"step-1": "<mask token>\n",
"step-2": "def favorite_book(name):\n print(f'One of my favorite books is {name}...')\n\n\n<mask token>\n",
"step-3": "def favorite_book(name):\n print(f'One of my favorite books is {name}...')\n\n\nfavorite_book('Alice in Wonderland')\n",
"step-4": "def favorite_book(name):\n \n print(f\"One of my favorite books is {name}...\")\n\nfavorite_book(\"Alice in Wonderland\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def start():
username = browser.find_element_by_name('username')
username.send_keys('Username')
password = browser.find_element_by_name('password')
password.send_keys('Password')
nextButton = browser.find_element_by_xpath("//button[@type='submit']")
nextButton.click()
sleep(4)
notification = browser.find_element_by_xpath(
"//button[contains(text(), 'Not Now')]")
notification.click()
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
sleep(5)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
browser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
sleep(2)
def likeAndComm():
global posts
for y in range(1, 4):
for x in range(1, 4):
post = browser.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[1]/div/div[' + str(
y) + ']/div[' + str(x) + ']')
browser.implicitly_wait(1)
post.click()
sleep(2)
postLike = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'
).click()
print('Post liked')
sleep(2)
print('click1')
sleep(3)
comment = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'
).click()
print('click2')
comment = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'
).send_keys(random.choice(comments))
print('send1-Writing comment')
sleep(3)
sendComment = browser.find_element_by_xpath(
"//button[@type='submit']")
sendComment.click()
print('click3-Comment-posted')
print('searching for new post, searching...')
sleep(4)
posts += 1
closePost = browser.find_element_by_xpath(
'/html/body/div[4]/div[3]/button/div')
closePost.click()
sleep(3)
print('No. of posts: ' + str(posts))
sleep(5)
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
def start():
username = browser.find_element_by_name('username')
username.send_keys('Username')
password = browser.find_element_by_name('password')
password.send_keys('Password')
nextButton = browser.find_element_by_xpath("//button[@type='submit']")
nextButton.click()
sleep(4)
notification = browser.find_element_by_xpath(
"//button[contains(text(), 'Not Now')]")
notification.click()
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
sleep(5)
start()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
comments = ['Please Visite on my page take a look if you like please follow ',
'Nice post- just follow me @eyetunities ',
'loool very nice!-want to earn money just follow me @eyetunities ',
'I like it!-follow me for daily motivational post on your wall',
'Super ;)-follow me guys @eyetunities ',
'hmmm,interesting-follow me for daily money earning tips ',
' wow- follow me for online money earning tips ',
'amazing post dude-also check out my profile , for Online money earning tips '
, 'learn something new - follow me @eyetunities ',
'Mind blowing - follow for money earning tips Online money ',
'I like it , great post- follow my page please -daily money earning tips ']
posts = 0
browser = webdriver.Chrome(executable_path=
'D:\\pythonlearn\\python_projects\\chromedriver.exe')
browser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
sleep(2)
def likeAndComm():
global posts
for y in range(1, 4):
for x in range(1, 4):
post = browser.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[1]/div/div[' + str(
y) + ']/div[' + str(x) + ']')
browser.implicitly_wait(1)
post.click()
sleep(2)
postLike = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'
).click()
print('Post liked')
sleep(2)
print('click1')
sleep(3)
comment = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'
).click()
print('click2')
comment = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'
).send_keys(random.choice(comments))
print('send1-Writing comment')
sleep(3)
sendComment = browser.find_element_by_xpath(
"//button[@type='submit']")
sendComment.click()
print('click3-Comment-posted')
print('searching for new post, searching...')
sleep(4)
posts += 1
closePost = browser.find_element_by_xpath(
'/html/body/div[4]/div[3]/button/div')
closePost.click()
sleep(3)
print('No. of posts: ' + str(posts))
sleep(5)
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
def start():
username = browser.find_element_by_name('username')
username.send_keys('Username')
password = browser.find_element_by_name('password')
password.send_keys('Password')
nextButton = browser.find_element_by_xpath("//button[@type='submit']")
nextButton.click()
sleep(4)
notification = browser.find_element_by_xpath(
"//button[contains(text(), 'Not Now')]")
notification.click()
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
sleep(5)
start()
<|reserved_special_token_1|>
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
import random
import string
from time import sleep
from selenium import webdriver
comments = ['Please Visite on my page take a look if you like please follow ',
'Nice post- just follow me @eyetunities ',
'loool very nice!-want to earn money just follow me @eyetunities ',
'I like it!-follow me for daily motivational post on your wall',
'Super ;)-follow me guys @eyetunities ',
'hmmm,interesting-follow me for daily money earning tips ',
' wow- follow me for online money earning tips ',
'amazing post dude-also check out my profile , for Online money earning tips '
, 'learn something new - follow me @eyetunities ',
'Mind blowing - follow for money earning tips Online money ',
'I like it , great post- follow my page please -daily money earning tips ']
posts = 0
browser = webdriver.Chrome(executable_path=
'D:\\pythonlearn\\python_projects\\chromedriver.exe')
browser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
sleep(2)
def likeAndComm():
global posts
for y in range(1, 4):
for x in range(1, 4):
post = browser.find_element_by_xpath(
'/html/body/div[1]/section/main/div/div[1]/div/div[' + str(
y) + ']/div[' + str(x) + ']')
browser.implicitly_wait(1)
post.click()
sleep(2)
postLike = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'
).click()
print('Post liked')
sleep(2)
print('click1')
sleep(3)
comment = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'
).click()
print('click2')
comment = browser.find_element_by_xpath(
'/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'
).send_keys(random.choice(comments))
print('send1-Writing comment')
sleep(3)
sendComment = browser.find_element_by_xpath(
"//button[@type='submit']")
sendComment.click()
print('click3-Comment-posted')
print('searching for new post, searching...')
sleep(4)
posts += 1
closePost = browser.find_element_by_xpath(
'/html/body/div[4]/div[3]/button/div')
closePost.click()
sleep(3)
print('No. of posts: ' + str(posts))
sleep(5)
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
def start():
username = browser.find_element_by_name('username')
username.send_keys('Username')
password = browser.find_element_by_name('password')
password.send_keys('Password')
nextButton = browser.find_element_by_xpath("//button[@type='submit']")
nextButton.click()
sleep(4)
notification = browser.find_element_by_xpath(
"//button[contains(text(), 'Not Now')]")
notification.click()
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
sleep(5)
start()
<|reserved_special_token_1|>
#----------------------------
# |
# Instagram Bot- Devesh Kr. Verma
# instagram- @felon_tpf
# |
#----------------------------
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
import random
import string
from time import sleep
from selenium import webdriver
#Change this list to your wanted comments (what you wnat to comment on posts)
comments = ['Please Visite on my page take a look if you like please follow ', 'Nice post- just follow me @eyetunities ', 'loool very nice!-want to earn money just follow me @eyetunities ', 'I like it!-follow me for daily motivational post on your wall', 'Super ;)-follow me guys @eyetunities ', 'hmmm,interesting-follow me for daily money earning tips ', ' wow- follow me for online money earning tips ', 'amazing post dude-also check out my profile , for Online money earning tips ', 'learn something new - follow me @eyetunities ', 'Mind blowing - follow for money earning tips Online money ', 'I like it , great post- follow my page please -daily money earning tips ', ]
#This variables to keep tracking of the posts
posts=0
#Chromedriver path. Make sure to have the same Chromedriver version as your Google Chrome browser
browser = webdriver.Chrome(executable_path= r"D:\pythonlearn\python_projects\chromedriver.exe") # <----- ENTER PATH HERE
browser.get(('https://www.instagram.com/accounts/login/?source=auth_switcher'))
sleep(2)
def likeAndComm(): # Likes and Comments the first 9 posts
global posts
for y in range (1,4):
for x in range(1,4):
post = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/div/div['+str(y)+']/div['+str(x)+']')
browser.implicitly_wait(1)
post.click()
sleep(2)
postLike = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]').click()
#postLike.click()
print("Post liked")
sleep(2)
#comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click()
print("click1")
sleep(3)
comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click()
print("click2")
comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea').send_keys(random.choice(comments))
print("send1-Writing comment")
sleep(3)
sendComment = browser.find_element_by_xpath("//button[@type='submit']")
sendComment.click()
print("click3-Comment-posted")
print("searching for new post, searching...")
sleep(4)
posts+=1
closePost=browser.find_element_by_xpath('/html/body/div[4]/div[3]/button/div')
closePost.click()
sleep(3)
print ('No. of posts: ' +str(posts))
sleep(5)
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm()
def start():
username = browser.find_element_by_name('username')
username.send_keys('Username') # <- INSERT YOUR INSTAGRAM USERNAME HERE
password = browser.find_element_by_name('password')
password.send_keys('Password') # <- INSERT YOUR INSTAGRAM PASSWORD HERE
nextButton = browser.find_element_by_xpath("//button[@type='submit']")
nextButton.click()
sleep(4)
notification = browser.find_element_by_xpath("//button[contains(text(), 'Not Now')]")
notification.click()
browser.get('https://www.instagram.com/explore/')
sleep(6)
likeAndComm() # likeAndComm function
sleep(5)
#Start the programm
start()
|
flexible
|
{
"blob_id": "6d18aa585c656b244d1e4272caa8419c04b20b6c",
"index": 2363,
"step-1": "<mask token>\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\n<mask token>\n",
"step-2": "<mask token>\nbrowser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')\nsleep(2)\n\n\ndef likeAndComm():\n global posts\n for y in range(1, 4):\n for x in range(1, 4):\n post = browser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[1]/div/div[' + str(\n y) + ']/div[' + str(x) + ']')\n browser.implicitly_wait(1)\n post.click()\n sleep(2)\n postLike = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'\n ).click()\n print('Post liked')\n sleep(2)\n print('click1')\n sleep(3)\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'\n ).click()\n print('click2')\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'\n ).send_keys(random.choice(comments))\n print('send1-Writing comment')\n sleep(3)\n sendComment = browser.find_element_by_xpath(\n \"//button[@type='submit']\")\n sendComment.click()\n print('click3-Comment-posted')\n print('searching for new post, searching...')\n sleep(4)\n posts += 1\n closePost = browser.find_element_by_xpath(\n '/html/body/div[4]/div[3]/button/div')\n closePost.click()\n sleep(3)\n print('No. of posts: ' + str(posts))\n sleep(5)\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\nstart()\n",
"step-3": "<mask token>\ncomments = ['Please Visite on my page take a look if you like please follow ',\n 'Nice post- just follow me @eyetunities ',\n 'loool very nice!-want to earn money just follow me @eyetunities ',\n 'I like it!-follow me for daily motivational post on your wall',\n 'Super ;)-follow me guys @eyetunities ',\n 'hmmm,interesting-follow me for daily money earning tips ',\n ' wow- follow me for online money earning tips ',\n 'amazing post dude-also check out my profile , for Online money earning tips '\n , 'learn something new - follow me @eyetunities ',\n 'Mind blowing - follow for money earning tips Online money ',\n 'I like it , great post- follow my page please -daily money earning tips ']\nposts = 0\nbrowser = webdriver.Chrome(executable_path=\n 'D:\\\\pythonlearn\\\\python_projects\\\\chromedriver.exe')\nbrowser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')\nsleep(2)\n\n\ndef likeAndComm():\n global posts\n for y in range(1, 4):\n for x in range(1, 4):\n post = browser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[1]/div/div[' + str(\n y) + ']/div[' + str(x) + ']')\n browser.implicitly_wait(1)\n post.click()\n sleep(2)\n postLike = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'\n ).click()\n print('Post liked')\n sleep(2)\n print('click1')\n sleep(3)\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'\n ).click()\n print('click2')\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'\n ).send_keys(random.choice(comments))\n print('send1-Writing comment')\n sleep(3)\n sendComment = browser.find_element_by_xpath(\n \"//button[@type='submit']\")\n sendComment.click()\n print('click3-Comment-posted')\n print('searching for new post, searching...')\n sleep(4)\n posts += 1\n closePost = browser.find_element_by_xpath(\n '/html/body/div[4]/div[3]/button/div')\n closePost.click()\n sleep(3)\n print('No. of posts: ' + str(posts))\n sleep(5)\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\nstart()\n",
"step-4": "from selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nimport random\nimport string\nfrom time import sleep\nfrom selenium import webdriver\ncomments = ['Please Visite on my page take a look if you like please follow ',\n 'Nice post- just follow me @eyetunities ',\n 'loool very nice!-want to earn money just follow me @eyetunities ',\n 'I like it!-follow me for daily motivational post on your wall',\n 'Super ;)-follow me guys @eyetunities ',\n 'hmmm,interesting-follow me for daily money earning tips ',\n ' wow- follow me for online money earning tips ',\n 'amazing post dude-also check out my profile , for Online money earning tips '\n , 'learn something new - follow me @eyetunities ',\n 'Mind blowing - follow for money earning tips Online money ',\n 'I like it , great post- follow my page please -daily money earning tips ']\nposts = 0\nbrowser = webdriver.Chrome(executable_path=\n 'D:\\\\pythonlearn\\\\python_projects\\\\chromedriver.exe')\nbrowser.get('https://www.instagram.com/accounts/login/?source=auth_switcher')\nsleep(2)\n\n\ndef likeAndComm():\n global posts\n for y in range(1, 4):\n for x in range(1, 4):\n post = browser.find_element_by_xpath(\n '/html/body/div[1]/section/main/div/div[1]/div/div[' + str(\n y) + ']/div[' + str(x) + ']')\n browser.implicitly_wait(1)\n post.click()\n sleep(2)\n postLike = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]'\n ).click()\n print('Post liked')\n sleep(2)\n print('click1')\n sleep(3)\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form'\n ).click()\n print('click2')\n comment = browser.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea'\n ).send_keys(random.choice(comments))\n print('send1-Writing comment')\n sleep(3)\n sendComment = browser.find_element_by_xpath(\n \"//button[@type='submit']\")\n sendComment.click()\n print('click3-Comment-posted')\n print('searching for new post, searching...')\n sleep(4)\n posts += 1\n closePost = browser.find_element_by_xpath(\n '/html/body/div[4]/div[3]/button/div')\n closePost.click()\n sleep(3)\n print('No. of posts: ' + str(posts))\n sleep(5)\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n\n\ndef start():\n username = browser.find_element_by_name('username')\n username.send_keys('Username')\n password = browser.find_element_by_name('password')\n password.send_keys('Password')\n nextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n nextButton.click()\n sleep(4)\n notification = browser.find_element_by_xpath(\n \"//button[contains(text(), 'Not Now')]\")\n notification.click()\n browser.get('https://www.instagram.com/explore/')\n sleep(6)\n likeAndComm()\n sleep(5)\n\n\nstart()\n",
"step-5": "#----------------------------\n#\t\t\t\t\t\t |\n# Instagram Bot- Devesh Kr. Verma \n# instagram- @felon_tpf\t\n#\t\t\t\t\t\t\t|\n#----------------------------\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom selenium.webdriver.common.keys import Keys\nimport random\nimport string\nfrom time import sleep\nfrom selenium import webdriver\n#Change this list to your wanted comments (what you wnat to comment on posts)\ncomments = ['Please Visite on my page take a look if you like please follow ', 'Nice post- just follow me @eyetunities ', 'loool very nice!-want to earn money just follow me @eyetunities ', 'I like it!-follow me for daily motivational post on your wall', 'Super ;)-follow me guys @eyetunities ', 'hmmm,interesting-follow me for daily money earning tips ', ' wow- follow me for online money earning tips ', 'amazing post dude-also check out my profile , for Online money earning tips ', 'learn something new - follow me @eyetunities ', 'Mind blowing - follow for money earning tips Online money ', 'I like it , great post- follow my page please -daily money earning tips ', ]\n\n#This variables to keep tracking of the posts \nposts=0\n\n#Chromedriver path. Make sure to have the same Chromedriver version as your Google Chrome browser\nbrowser = webdriver.Chrome(executable_path= r\"D:\\pythonlearn\\python_projects\\chromedriver.exe\") # <----- ENTER PATH HERE \n\nbrowser.get(('https://www.instagram.com/accounts/login/?source=auth_switcher'))\nsleep(2) \n\t\n\ndef likeAndComm(): # Likes and Comments the first 9 posts\n\tglobal posts\n\tfor y in range (1,4):\n\t\tfor x in range(1,4):\n\t\t\tpost = browser.find_element_by_xpath('/html/body/div[1]/section/main/div/div[1]/div/div['+str(y)+']/div['+str(x)+']') \n\t\t\tbrowser.implicitly_wait(1) \n\t\t\tpost.click()\n\t\t\tsleep(2)\n\t\t\tpostLike = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]').click()\n\t\t\t#postLike.click()\n\t\t\tprint(\"Post liked\") \n\t\t\tsleep(2)\n\t\t\t#comment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click() \n\t\t\tprint(\"click1\")\n\t\t\tsleep(3)\n\t\t\tcomment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form').click() \n\t\t\tprint(\"click2\")\n\t\t\tcomment = browser.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[3]/div/form/textarea').send_keys(random.choice(comments))\t\n\t\t\tprint(\"send1-Writing comment\")\n\t\t\tsleep(3)\n\t\t\tsendComment = browser.find_element_by_xpath(\"//button[@type='submit']\") \n\t\t\tsendComment.click()\n\t\t\tprint(\"click3-Comment-posted\")\n\t\t\tprint(\"searching for new post, searching...\")\n\t\t\tsleep(4)\n\t\t\tposts+=1\n\t\t\tclosePost=browser.find_element_by_xpath('/html/body/div[4]/div[3]/button/div')\n\t\t\tclosePost.click()\n\t\t\tsleep(3)\n\t\tprint ('No. of posts: ' +str(posts))\n\t\n\tsleep(5)\n\tbrowser.get('https://www.instagram.com/explore/')\n\tsleep(6)\n\tlikeAndComm()\n\t\n\t\t\ndef start():\n\t\n\tusername = browser.find_element_by_name('username')\n\tusername.send_keys('Username') # <- INSERT YOUR INSTAGRAM USERNAME HERE\n\tpassword = browser.find_element_by_name('password')\n\tpassword.send_keys('Password') # <- INSERT YOUR INSTAGRAM PASSWORD HERE\n\tnextButton = browser.find_element_by_xpath(\"//button[@type='submit']\")\n\tnextButton.click()\n\tsleep(4)\n\tnotification = browser.find_element_by_xpath(\"//button[contains(text(), 'Not Now')]\")\n\tnotification.click()\n\tbrowser.get('https://www.instagram.com/explore/')\n\tsleep(6)\n\tlikeAndComm() # likeAndComm function \n\tsleep(5)\n\t\n\t\n#Start the programm\nstart()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import argparse
import pandas as pd
import random
import time
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = "{}: {}".format(self.deck.front, self.front)
return r
def show_back(self):
return "{}: {}".format(self.deck.back, self.back)
def show_card(self):
return "{}: {}, {}: {}".format(self.deck.front, self.front, self.deck.back, self.back)
def show_reverse(self):
return "{}: {}, {}: {}".format(self.deck.back, self.back, self.deck.front, self.front)
def create_deck(filename, num_cols):
df = pd.read_excel(filename)
front = df.columns.values[0]
back = df.columns.values[1]
deck = Deck(num_cols, front, back)
for i in range(num_cols):
front_column = "{}.{}".format(front, i) if i else front
back_column = "{}.{}".format(back, i) if i else back
for row in range(df[front_column].size):
f = df[front_column][row]
b = df[back_column][row]
if not (pd.isnull(f) or pd.isnull(b)):
fc = Flashcard(deck, f.strip(), b.strip(), i, row)
deck.flashcards.append(fc)
return deck
def get_cards_from_deck(deck, first_letter, start_index, number_of_cards):
flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or first_letter == -1]
return flashcards[start_index:number_of_cards+start_index]
def play_game(deck, mode, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)
play_cards(mode, deck, flashcards)
def play_cards(mode, deck, cards):
source = deck.front if mode%2 == 0 else deck.back
target = deck.back if mode%2 == 0 else deck.front
if mode >= 2:
random.shuffle(cards)
num_cards = len(cards)
start_time = time.time()
for i, fc in enumerate(cards):
source_word = fc.front if mode%2==0 else fc.back
target_word = fc.back if mode%2==0 else fc.front
quiz(fc, source, source_word, target, target_word, i, num_cards)
print("All Done!")
correct = sum(fc.correct == True for fc in cards)
incorrect = len(cards) - correct
print("Correct: {}".format(correct))
print("Incorrect: {}".format(incorrect))
if (incorrect):
incorrect_cards = [fc for fc in cards if not fc.correct]
print("\n".join([fc.show_card() for fc in incorrect_cards]))
again = input("review incorrect words (y/n): ")
if again == 'y' or again == '1' or again == 'да':
play_cards(mode, deck, incorrect_cards)
else:
finish_time = time.time()
time_diff = time.gmtime(finish_time - start_time)
avg_time = time.gmtime((finish_time - start_time) / num_cards)
print("Total Time: {}".format(time.strftime("%H:%M:%S", time_diff)))
print("Time per card: {}".format(time.strftime("%H:%M:%S", avg_time)))
def quiz(fc, source_language, source_word, target_language, target_word, i, number_of_cards):
print("Card {}/{}".format(i+1, number_of_cards))
print("{} word: {}".format(source_language, source_word))
answer = input("Enter {} translation: ".format(target_language))
if is_correct(answer, target_word):
fc.correct = True
print("Correct!")
else:
print("Incorrect! Correct answer was: {}".format(target_word))
n = input("Enter {} translation for {}: ".format(target_language, source_word))
def is_correct(answer, target):
return format_for_comparison(answer) == format_for_comparison(target)
def format_for_comparison(word):
# strip whitespace and lowercase
word = word.strip().lower()
# pop off the declensions from the end
word = word.split('(')
# sort the list of meanings
word[0] = word[0].split(', ')
word[0].sort()
# join the first part back together:
word[0] = ', '.join(word[0])
# now add the declensions back on
word = '('.join(word)
return word
def learn_words(deck, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)
for i, card in enumerate(flashcards):
print("Card {}/{}".format(i+1, number_of_cards))
input("{}\nPractice: ".format(card.show_card()))
input("{}\nPractice: ".format(card.show_front()))
input("{}\nPractice: ".format(card.show_back()))
print("Done! Review learned words:")
for card in flashcards:
print("{}".format(card.show_card()))
def main(filename, first_letter, start_index, number_of_cards, mode):
num_cols = 9
deck = create_deck(filename, num_cols)
print("Welcome to The Flashcard Learner!")
# print("Available Modes:")
# print("0: Quiz - Given a word in {}, provide {} translation".format(deck.front.lower(), deck.back.lower()))
# print("1: Quiz - Given a word in {}, provide {} translation".format(deck.back.lower(), deck.front.lower()))
# print("2: Mode 0 with cards given in random order")
# print("3: Mode 1 with cards given in random order")
# print("4: Learning - Shown {} and {} side by side, practice typing both".format(deck.front.lower(), deck.back.lower()))
# mode = int(input("Enter mode: "))
print("Okay! Let's play!")
if mode == 4:
learn_words(deck, first_letter, start_index, number_of_cards)
else:
play_game(deck, mode, first_letter, start_index, number_of_cards)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Learn flashcards")
parser.add_argument("filename", help="name of .xlsx file with vocab", default="RussianVocab.xlsx")
parser.add_argument("category", type=int, help="e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)")
parser.add_argument("start", type=int, help="start index (lists are 0-indexed)")
parser.add_argument("num", type=int, help="number of cards you'd like to see")
parser.add_argument("mode", type=int)
args = parser.parse_args()
main(args.filename, args.category, args.start, args.num, args.mode)
|
normal
|
{
"blob_id": "d5903698eb8ed6be531b0cc522d4feff6b79da4e",
"index": 954,
"step-1": "<mask token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn flashcards')\n parser.add_argument('filename', help='name of .xlsx file with vocab',\n default='RussianVocab.xlsx')\n parser.add_argument('category', type=int, help=\n 'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')\n parser.add_argument('start', type=int, help=\n 'start index (lists are 0-indexed)')\n parser.add_argument('num', type=int, help=\n \"number of cards you'd like to see\")\n parser.add_argument('mode', type=int)\n args = parser.parse_args()\n main(args.filename, args.category, args.start, args.num, args.mode)\n",
"step-4": "import argparse\nimport pandas as pd\nimport random\nimport time\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn flashcards')\n parser.add_argument('filename', help='name of .xlsx file with vocab',\n default='RussianVocab.xlsx')\n parser.add_argument('category', type=int, help=\n 'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')\n parser.add_argument('start', type=int, help=\n 'start index (lists are 0-indexed)')\n parser.add_argument('num', type=int, help=\n \"number of cards you'd like to see\")\n parser.add_argument('mode', type=int)\n args = parser.parse_args()\n main(args.filename, args.category, args.start, args.num, args.mode)\n",
"step-5": "import argparse\nimport pandas as pd\nimport random\nimport time\n\nclass Deck:\n\tdef __init__(self, num_cols, front, back):\n\t\tself.flashcards = []\n\t\tself.num_cols = num_cols\n\t\tself.front = front\n\t\tself.back = back\n\nclass Flashcard:\n\tdef __init__(self, deck, front, back, column, row):\n\t\tself.deck = deck\n\t\tself.front = front\n\t\tself.back = back\n\t\tself.column = column\n\t\tself.row = row\n\t\tself.correct = False\n\n\tdef show_front(self):\n\t\tr = \"{}: {}\".format(self.deck.front, self.front)\n\t\treturn r\n\n\tdef show_back(self):\n\t\treturn \"{}: {}\".format(self.deck.back, self.back)\n\n\tdef show_card(self):\n\t\treturn \"{}: {}, {}: {}\".format(self.deck.front, self.front, self.deck.back, self.back)\n\n\tdef show_reverse(self):\n\t\treturn \"{}: {}, {}: {}\".format(self.deck.back, self.back, self.deck.front, self.front)\n\n\ndef create_deck(filename, num_cols):\n\tdf = pd.read_excel(filename)\n\tfront = df.columns.values[0]\n\tback = df.columns.values[1]\n\n\tdeck = Deck(num_cols, front, back)\n\tfor i in range(num_cols):\n\t\tfront_column = \"{}.{}\".format(front, i) if i else front\n\t\tback_column = \"{}.{}\".format(back, i) if i else back\n\t\tfor row in range(df[front_column].size):\n\t\t\tf = df[front_column][row]\n\t\t\tb = df[back_column][row]\n\t\t\tif not (pd.isnull(f) or pd.isnull(b)):\t\n\t\t\t\tfc = Flashcard(deck, f.strip(), b.strip(), i, row)\n\t\t\t\tdeck.flashcards.append(fc)\n\t\n\treturn deck\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n\tflashcards = [fc for fc in deck.flashcards if fc.column == first_letter or first_letter == -1]\n\treturn flashcards[start_index:number_of_cards+start_index]\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n\tflashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)\n\tplay_cards(mode, deck, flashcards)\n\ndef play_cards(mode, deck, cards):\n\tsource = deck.front if mode%2 == 0 else deck.back\n\ttarget = deck.back if mode%2 == 0 else deck.front\n\n\tif mode >= 2:\n\t\trandom.shuffle(cards)\n\n\tnum_cards = len(cards)\n\tstart_time = time.time()\n\n\tfor i, fc in enumerate(cards):\n\t\tsource_word = fc.front if mode%2==0 else fc.back\n\t\ttarget_word = fc.back if mode%2==0 else fc.front\n\n\t\tquiz(fc, source, source_word, target, target_word, i, num_cards)\n\n\tprint(\"All Done!\")\n\tcorrect = sum(fc.correct == True for fc in cards)\n\tincorrect = len(cards) - correct\n\tprint(\"Correct: {}\".format(correct))\n\tprint(\"Incorrect: {}\".format(incorrect))\n\n\tif (incorrect):\n\t\tincorrect_cards = [fc for fc in cards if not fc.correct]\n\t\tprint(\"\\n\".join([fc.show_card() for fc in incorrect_cards]))\n\t\tagain = input(\"review incorrect words (y/n): \")\n\t\tif again == 'y' or again == '1' or again == 'да':\n\t\t\tplay_cards(mode, deck, incorrect_cards)\n\telse:\n\t\tfinish_time = time.time()\n\t\ttime_diff = time.gmtime(finish_time - start_time)\n\t\tavg_time = time.gmtime((finish_time - start_time) / num_cards)\n\t\tprint(\"Total Time: {}\".format(time.strftime(\"%H:%M:%S\", time_diff)))\n\t\tprint(\"Time per card: {}\".format(time.strftime(\"%H:%M:%S\", avg_time)))\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i, number_of_cards):\n\t\tprint(\"Card {}/{}\".format(i+1, number_of_cards))\n\t\tprint(\"{} word: {}\".format(source_language, source_word))\n\t\tanswer = input(\"Enter {} translation: \".format(target_language))\n\t\t\n\t\tif is_correct(answer, target_word):\n\t\t\tfc.correct = True\n\t\t\tprint(\"Correct!\")\n\t\t\n\t\telse:\n\t\t\tprint(\"Incorrect! Correct answer was: {}\".format(target_word))\n\t\t\tn = input(\"Enter {} translation for {}: \".format(target_language, source_word))\n\n\ndef is_correct(answer, target):\n\treturn format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n\t# strip whitespace and lowercase\n\tword = word.strip().lower()\n\n\t# pop off the declensions from the end\n\tword = word.split('(')\n\n\t# sort the list of meanings\n\tword[0] = word[0].split(', ')\n\tword[0].sort()\n\n\t# join the first part back together:\n\tword[0] = ', '.join(word[0])\n\n\t# now add the declensions back on\n\tword = '('.join(word)\n\t\n\treturn word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n\tflashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)\n\tfor i, card in enumerate(flashcards):\n\t\tprint(\"Card {}/{}\".format(i+1, number_of_cards))\n\t\tinput(\"{}\\nPractice: \".format(card.show_card()))\n\t\tinput(\"{}\\nPractice: \".format(card.show_front()))\n\t\tinput(\"{}\\nPractice: \".format(card.show_back()))\n\t\n\tprint(\"Done! Review learned words:\")\n\tfor card in flashcards:\n\t\tprint(\"{}\".format(card.show_card()))\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n\tnum_cols = 9\n\tdeck = create_deck(filename, num_cols)\n\tprint(\"Welcome to The Flashcard Learner!\")\n\t# print(\"Available Modes:\")\n\t# print(\"0: Quiz - Given a word in {}, provide {} translation\".format(deck.front.lower(), deck.back.lower()))\n\t# print(\"1: Quiz - Given a word in {}, provide {} translation\".format(deck.back.lower(), deck.front.lower()))\n\t# print(\"2: Mode 0 with cards given in random order\")\n\t# print(\"3: Mode 1 with cards given in random order\")\n\t# print(\"4: Learning - Shown {} and {} side by side, practice typing both\".format(deck.front.lower(), deck.back.lower()))\n\t# mode = int(input(\"Enter mode: \"))\n\t\n\tprint(\"Okay! Let's play!\")\n\tif mode == 4:\n\t\tlearn_words(deck, first_letter, start_index, number_of_cards)\n\telse:\n\t\tplay_game(deck, mode, first_letter, start_index, number_of_cards)\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"Learn flashcards\")\n\tparser.add_argument(\"filename\", help=\"name of .xlsx file with vocab\", default=\"RussianVocab.xlsx\")\n\tparser.add_argument(\"category\", type=int, help=\"e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)\")\n\tparser.add_argument(\"start\", type=int, help=\"start index (lists are 0-indexed)\")\n\tparser.add_argument(\"num\", type=int, help=\"number of cards you'd like to see\")\n\tparser.add_argument(\"mode\", type=int)\n\targs = parser.parse_args()\n\tmain(args.filename, args.category, args.start, args.num, args.mode)\n\n",
"step-ids": [
8,
17,
18,
19,
20
]
}
|
[
8,
17,
18,
19,
20
] |
<|reserved_special_token_0|>
@Interface.staticderived
class Plugin(PluginBase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
@Interface.override
def Generate(open_file_func, global_custom_structs, global_custom_enums,
data, output_dir, status_stream):
result_code = 0
status_stream.write('Preprocessing data...')
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item,
global_custom_structs, global_custom_enums) for item in
items])
status_stream.write('Generating Common Files...')
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func,
output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(suffix='\n') as dm:
for index, (items, items_type_info_data) in enumerate(zip(
data, type_info_data)):
dm.stream.write("Processing '{}' ({} of {})...".format(
items[0].name, index + 1, len(data)))
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(open_file_func, output_dir,
items, items_type_info_data, this_dm.stream)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
<|reserved_special_token_0|>
class TypeInfoData(object):
def __init__(self, item, global_custom_structs, global_custom_enums):
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(
item, 'custom_enums', [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr
(item, 'custom_structs', [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
configuration_param_type_infos = []
for configuration_param in getattr(item, 'configuration_params', []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type
].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(configuration_param.type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(item.input_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(item.output_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item
.output_type), custom_structs=custom_structs, custom_enums=
custom_enums)
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
_type_info_classes = None
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,
SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,
StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'
) or obj_name == 'TypeInfo':
continue
type_info_classes.append(getattr(compound_module, obj_name))
cls._type_info_classes = type_info_classes
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith('?'):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, 'match'):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(*args, member_type=the_type, is_optional=
is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@Interface.staticderived
class Plugin(PluginBase):
Name = Interface.DerivedProperty('SharedLibraryTests')
Description = Interface.DerivedProperty(
'Generates code used when testing the Shared Library import/export layer'
)
@staticmethod
@Interface.override
def Generate(open_file_func, global_custom_structs, global_custom_enums,
data, output_dir, status_stream):
result_code = 0
status_stream.write('Preprocessing data...')
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item,
global_custom_structs, global_custom_enums) for item in
items])
status_stream.write('Generating Common Files...')
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func,
output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(suffix='\n') as dm:
for index, (items, items_type_info_data) in enumerate(zip(
data, type_info_data)):
dm.stream.write("Processing '{}' ({} of {})...".format(
items[0].name, index + 1, len(data)))
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(open_file_func, output_dir,
items, items_type_info_data, this_dm.stream)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
<|reserved_special_token_0|>
class TypeInfoData(object):
def __init__(self, item, global_custom_structs, global_custom_enums):
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(
item, 'custom_enums', [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr
(item, 'custom_structs', [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
configuration_param_type_infos = []
for configuration_param in getattr(item, 'configuration_params', []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type
].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(configuration_param.type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(item.input_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(item.output_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item
.output_type), custom_structs=custom_structs, custom_enums=
custom_enums)
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
_type_info_classes = None
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,
SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,
StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'
) or obj_name == 'TypeInfo':
continue
type_info_classes.append(getattr(compound_module, obj_name))
cls._type_info_classes = type_info_classes
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith('?'):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, 'match'):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(*args, member_type=the_type, is_optional=
is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@Interface.staticderived
class Plugin(PluginBase):
Name = Interface.DerivedProperty('SharedLibraryTests')
Description = Interface.DerivedProperty(
'Generates code used when testing the Shared Library import/export layer'
)
@staticmethod
@Interface.override
def Generate(open_file_func, global_custom_structs, global_custom_enums,
data, output_dir, status_stream):
result_code = 0
status_stream.write('Preprocessing data...')
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item,
global_custom_structs, global_custom_enums) for item in
items])
status_stream.write('Generating Common Files...')
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func,
output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(suffix='\n') as dm:
for index, (items, items_type_info_data) in enumerate(zip(
data, type_info_data)):
dm.stream.write("Processing '{}' ({} of {})...".format(
items[0].name, index + 1, len(data)))
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(open_file_func, output_dir,
items, items_type_info_data, this_dm.stream)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
def _GenerateHeaderFile(open_file_func, output_dir, items,
all_type_info_data, output_stream):
with open_file_func(os.path.join(output_dir, 'SharedLibraryTests_{}.h'.
format(items[0].name)), 'w') as f:
f.write(textwrap.dedent(
""" /* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_{name}.h"
#include "Traits.h"
#include "Featurizers/Structs.h"
#include "SharedLibraryTests_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
"""
).format(name=items[0].name))
for item, type_info_data in zip(items, all_type_info_data):
template = getattr(item, 'template', None)
if template:
suffix = '_{}_'.format(template)
type_desc = ' <{}>'.format(template)
cpp_template_suffix = '<{}>'.format(type_info_data.
InputTypeInfo.CppType)
else:
suffix = '_'
type_desc = ''
cpp_template_suffix = ''
if type_info_data.ConfigurationParamTypeInfos:
constructor_template_params = ', typename... ConstructorArgTs'
constructor_params = (
',\n ConstructorArgTs &&... constructor_args')
constructor_args = (
'std::forward<ConstructorArgTs>(constructor_args)..., ')
else:
constructor_template_params = ''
constructor_params = ''
constructor_args = ''
fit_prefix_statements = ''
transform_input_args = (type_info_data.InputTypeInfo.
GetTransformInputArgs())
if isinstance(transform_input_args, tuple):
transform_input_args, fit_prefix_statements = (
transform_input_args)
if type_info_data.InputTypeInfo.TypeName == 'bool':
for_loop = 'for(bool input : inference_input)'
else:
for_loop = 'for(auto const & input : inference_input)'
if type_info_data.OutputTypeInfo.TypeName == 'bool':
invocation_template = 'results.push_back({});'
else:
invocation_template = 'results.emplace_back({});'
if item.has_dynamic_output:
output_statement_info = (type_info_data.
DynamicOutputTypeInfo.GetOutputInfo(invocation_template
=invocation_template, result_name='results'))
else:
output_statement_info = (type_info_data.OutputTypeInfo.
GetOutputInfo(invocation_template=invocation_template,
result_name='results'))
f.write(textwrap.dedent(
""" /* ---------------------------------------------------------------------- */
/* | {name}{type_desc} */
template <typename VectorInputT{constructor_template_params}>
void {name}{suffix}Test(
std::vector<VectorInputT> const &training_input,
std::vector<VectorInputT> const &inference_input,
std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}
) {{
ErrorInfoHandle * pErrorInfo(nullptr);
// Create the estimator
{name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);
REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));
REQUIRE(pEstimatorHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Train
if(training_input.empty() == false) {{
typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());
while(true) {{
TrainingState trainingState(0);
REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(trainingState != Training)
break;
FitResult result(0);
auto const & input(*iter);
{fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(result == ResetAndContinue) {{
iter = training_input.begin();
continue;
}}
++iter;
if(iter == training_input.end()) {{
REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
iter = training_input.begin();
}}
}}
}}
{name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);
REQUIRE(pErrorInfo == nullptr);
// Once here, training should be complete
{{
bool is_complete(false);
REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
REQUIRE(is_complete);
}}
// Create the Transformer
{name}{suffix}TransformerHandle * pTransformerHandle(nullptr);
REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));
REQUIRE(pTransformerHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Destroy the estimator
REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
"""
).format(name=item.name, type_desc=type_desc, suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
constructor_template_params=constructor_template_params,
constructor_params=constructor_params, constructor_args=
constructor_args, fit_input_args=transform_input_args,
fit_prefix_statements='' if not fit_prefix_statements else
"""{}
""".format(StringHelpers.LeftJustify(
fit_prefix_statements.rstrip(), 12))))
inline_destroy_statement = '// No inline destroy statement'
trailing_destroy_statement = '// No trailing destroy statement'
if output_statement_info.DestroyArgs:
if output_statement_info.DestroyInline:
inline_destroy_statement = textwrap.dedent(
"""
// Destroy the contents
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
"""
).format(name=item.name, suffix=suffix, args=
output_statement_info.DestroyArgs)
else:
trailing_destroy_statement = textwrap.dedent(
""" for(auto & {var_name}: results) {{
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
"""
).format(name=item.name, suffix=suffix, args=
output_statement_info.DestroyArgs, var_name=
output_statement_info.DestroyVarName or 'result')
if item.has_dynamic_output:
f.write(StringHelpers.LeftJustify(textwrap.dedent(
""" // Inference
std::vector<{vector_result_type}> results;
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
if(true) {{
{transform_vars}
REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
"""
).format(name=item.name, suffix=suffix,
vector_result_type=output_statement_info.
VectorResultType, for_loop=for_loop,
transform_prefix_statements='' if not
fit_prefix_statements else '{}\n\n '.format(
StringHelpers.LeftJustify(fit_prefix_statements, 4).
rstrip()), transform_vars=StringHelpers.LeftJustify(
'\n'.join(['{} {};'.format(var.Type, var.Name) for var in
output_statement_info.TransformVars]), 4),
transform_input_args=transform_input_args,
transform_output_args=', '.join(['&{}'.format(p.Name) for
p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(), 4
), inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(), 4)), 4,
skip_first_line=False))
else:
f.write(StringHelpers.LeftJustify(textwrap.dedent(
""" // Inference
std::vector<{vector_result_type}> results;
results.reserve(inference_input.size());
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
"""
).format(name=item.name, suffix=suffix,
vector_result_type=output_statement_info.
VectorResultType, for_loop=for_loop,
transform_prefix_statements='' if not
fit_prefix_statements else '{}\n\n '.format(
StringHelpers.LeftJustify(fit_prefix_statements, 4).
rstrip()), transform_vars=StringHelpers.LeftJustify(
'\n'.join(['{} {};'.format(var.Type, var.Name) for var in
output_statement_info.TransformVars]), 4),
transform_input_args=transform_input_args,
transform_output_args=', '.join(['&{}'.format(p.Name) for
p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(), 4
), inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(), 4)), 4,
skip_first_line=False))
f.write(textwrap.dedent(
"""
REQUIRE(verify_func(results));
{trailing_destroy_statement}
// Destroy the transformer
REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
"""
).format(name=item.name, suffix=suffix,
trailing_destroy_statement=StringHelpers.LeftJustify(
trailing_destroy_statement.rstrip(), 4)))
f.write(textwrap.dedent(
""" #if (defined _MSC_VER)
# pragma warning(pop)
#endif
"""
))
<|reserved_special_token_0|>
class TypeInfoData(object):
def __init__(self, item, global_custom_structs, global_custom_enums):
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(
item, 'custom_enums', [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr
(item, 'custom_structs', [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
configuration_param_type_infos = []
for configuration_param in getattr(item, 'configuration_params', []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type
].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(configuration_param.type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(item.input_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(item.output_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item
.output_type), custom_structs=custom_structs, custom_enums=
custom_enums)
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
_type_info_classes = None
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,
SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,
StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'
) or obj_name == 'TypeInfo':
continue
type_info_classes.append(getattr(compound_module, obj_name))
cls._type_info_classes = type_info_classes
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith('?'):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, 'match'):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(*args, member_type=the_type, is_optional=
is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@Interface.staticderived
class Plugin(PluginBase):
Name = Interface.DerivedProperty('SharedLibraryTests')
Description = Interface.DerivedProperty(
'Generates code used when testing the Shared Library import/export layer'
)
@staticmethod
@Interface.override
def Generate(open_file_func, global_custom_structs, global_custom_enums,
data, output_dir, status_stream):
result_code = 0
status_stream.write('Preprocessing data...')
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item,
global_custom_structs, global_custom_enums) for item in
items])
status_stream.write('Generating Common Files...')
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func,
output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(suffix='\n') as dm:
for index, (items, items_type_info_data) in enumerate(zip(
data, type_info_data)):
dm.stream.write("Processing '{}' ({} of {})...".format(
items[0].name, index + 1, len(data)))
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(open_file_func, output_dir,
items, items_type_info_data, this_dm.stream)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
def _GenerateHeaderFile(open_file_func, output_dir, items,
all_type_info_data, output_stream):
with open_file_func(os.path.join(output_dir, 'SharedLibraryTests_{}.h'.
format(items[0].name)), 'w') as f:
f.write(textwrap.dedent(
""" /* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_{name}.h"
#include "Traits.h"
#include "Featurizers/Structs.h"
#include "SharedLibraryTests_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
"""
).format(name=items[0].name))
for item, type_info_data in zip(items, all_type_info_data):
template = getattr(item, 'template', None)
if template:
suffix = '_{}_'.format(template)
type_desc = ' <{}>'.format(template)
cpp_template_suffix = '<{}>'.format(type_info_data.
InputTypeInfo.CppType)
else:
suffix = '_'
type_desc = ''
cpp_template_suffix = ''
if type_info_data.ConfigurationParamTypeInfos:
constructor_template_params = ', typename... ConstructorArgTs'
constructor_params = (
',\n ConstructorArgTs &&... constructor_args')
constructor_args = (
'std::forward<ConstructorArgTs>(constructor_args)..., ')
else:
constructor_template_params = ''
constructor_params = ''
constructor_args = ''
fit_prefix_statements = ''
transform_input_args = (type_info_data.InputTypeInfo.
GetTransformInputArgs())
if isinstance(transform_input_args, tuple):
transform_input_args, fit_prefix_statements = (
transform_input_args)
if type_info_data.InputTypeInfo.TypeName == 'bool':
for_loop = 'for(bool input : inference_input)'
else:
for_loop = 'for(auto const & input : inference_input)'
if type_info_data.OutputTypeInfo.TypeName == 'bool':
invocation_template = 'results.push_back({});'
else:
invocation_template = 'results.emplace_back({});'
if item.has_dynamic_output:
output_statement_info = (type_info_data.
DynamicOutputTypeInfo.GetOutputInfo(invocation_template
=invocation_template, result_name='results'))
else:
output_statement_info = (type_info_data.OutputTypeInfo.
GetOutputInfo(invocation_template=invocation_template,
result_name='results'))
f.write(textwrap.dedent(
""" /* ---------------------------------------------------------------------- */
/* | {name}{type_desc} */
template <typename VectorInputT{constructor_template_params}>
void {name}{suffix}Test(
std::vector<VectorInputT> const &training_input,
std::vector<VectorInputT> const &inference_input,
std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}
) {{
ErrorInfoHandle * pErrorInfo(nullptr);
// Create the estimator
{name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);
REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));
REQUIRE(pEstimatorHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Train
if(training_input.empty() == false) {{
typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());
while(true) {{
TrainingState trainingState(0);
REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(trainingState != Training)
break;
FitResult result(0);
auto const & input(*iter);
{fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(result == ResetAndContinue) {{
iter = training_input.begin();
continue;
}}
++iter;
if(iter == training_input.end()) {{
REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
iter = training_input.begin();
}}
}}
}}
{name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);
REQUIRE(pErrorInfo == nullptr);
// Once here, training should be complete
{{
bool is_complete(false);
REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
REQUIRE(is_complete);
}}
// Create the Transformer
{name}{suffix}TransformerHandle * pTransformerHandle(nullptr);
REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));
REQUIRE(pTransformerHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Destroy the estimator
REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
"""
).format(name=item.name, type_desc=type_desc, suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
constructor_template_params=constructor_template_params,
constructor_params=constructor_params, constructor_args=
constructor_args, fit_input_args=transform_input_args,
fit_prefix_statements='' if not fit_prefix_statements else
"""{}
""".format(StringHelpers.LeftJustify(
fit_prefix_statements.rstrip(), 12))))
inline_destroy_statement = '// No inline destroy statement'
trailing_destroy_statement = '// No trailing destroy statement'
if output_statement_info.DestroyArgs:
if output_statement_info.DestroyInline:
inline_destroy_statement = textwrap.dedent(
"""
// Destroy the contents
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
"""
).format(name=item.name, suffix=suffix, args=
output_statement_info.DestroyArgs)
else:
trailing_destroy_statement = textwrap.dedent(
""" for(auto & {var_name}: results) {{
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
"""
).format(name=item.name, suffix=suffix, args=
output_statement_info.DestroyArgs, var_name=
output_statement_info.DestroyVarName or 'result')
if item.has_dynamic_output:
f.write(StringHelpers.LeftJustify(textwrap.dedent(
""" // Inference
std::vector<{vector_result_type}> results;
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
if(true) {{
{transform_vars}
REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
"""
).format(name=item.name, suffix=suffix,
vector_result_type=output_statement_info.
VectorResultType, for_loop=for_loop,
transform_prefix_statements='' if not
fit_prefix_statements else '{}\n\n '.format(
StringHelpers.LeftJustify(fit_prefix_statements, 4).
rstrip()), transform_vars=StringHelpers.LeftJustify(
'\n'.join(['{} {};'.format(var.Type, var.Name) for var in
output_statement_info.TransformVars]), 4),
transform_input_args=transform_input_args,
transform_output_args=', '.join(['&{}'.format(p.Name) for
p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(), 4
), inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(), 4)), 4,
skip_first_line=False))
else:
f.write(StringHelpers.LeftJustify(textwrap.dedent(
""" // Inference
std::vector<{vector_result_type}> results;
results.reserve(inference_input.size());
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
"""
).format(name=item.name, suffix=suffix,
vector_result_type=output_statement_info.
VectorResultType, for_loop=for_loop,
transform_prefix_statements='' if not
fit_prefix_statements else '{}\n\n '.format(
StringHelpers.LeftJustify(fit_prefix_statements, 4).
rstrip()), transform_vars=StringHelpers.LeftJustify(
'\n'.join(['{} {};'.format(var.Type, var.Name) for var in
output_statement_info.TransformVars]), 4),
transform_input_args=transform_input_args,
transform_output_args=', '.join(['&{}'.format(p.Name) for
p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(), 4
), inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(), 4)), 4,
skip_first_line=False))
f.write(textwrap.dedent(
"""
REQUIRE(verify_func(results));
{trailing_destroy_statement}
// Destroy the transformer
REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
"""
).format(name=item.name, suffix=suffix,
trailing_destroy_statement=StringHelpers.LeftJustify(
trailing_destroy_statement.rstrip(), 4)))
f.write(textwrap.dedent(
""" #if (defined _MSC_VER)
# pragma warning(pop)
#endif
"""
))
def _GenerateCommonFiles(open_file_func, output_dir, output_stream):
with open_file_func(os.path.join(output_dir,
'SharedLibraryTests_Common.hpp'), 'w') as f:
f.write(textwrap.dedent(
""" /* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
"""
))
for type_info_class in TypeInfoData.EnumTypeInfoClasses():
type_info_class.CreateHelperMethods(f)
f.write(textwrap.dedent(
""" #if (defined _MSC_VER)
# pragma warning(pop)
#endif
"""
))
return 0
class TypeInfoData(object):
def __init__(self, item, global_custom_structs, global_custom_enums):
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(
item, 'custom_enums', [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr
(item, 'custom_structs', [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
configuration_param_type_infos = []
for configuration_param in getattr(item, 'configuration_params', []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type
].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(configuration_param.type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(item.input_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(item.output_type,
custom_structs=custom_structs, custom_enums=custom_enums)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item
.output_type), custom_structs=custom_structs, custom_enums=
custom_enums)
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
_type_info_classes = None
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,
SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,
StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'
) or obj_name == 'TypeInfo':
continue
type_info_classes.append(getattr(compound_module, obj_name))
cls._type_info_classes = type_info_classes
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith('?'):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, 'match'):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(*args, member_type=the_type, is_optional=
is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)
<|reserved_special_token_1|>
# ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the Plugin object"""
import itertools
import os
import sys
import textwrap
from collections import OrderedDict
import six
import CommonEnvironment
from CommonEnvironment.CallOnExit import CallOnExit
from CommonEnvironment import StringHelpers
from CommonEnvironment import Interface
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
sys.path.insert(0, os.path.join(_script_dir, ".."))
with CallOnExit(lambda: sys.path.pop(0)):
from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase
# ----------------------------------------------------------------------
@Interface.staticderived
class Plugin(PluginBase):
# ----------------------------------------------------------------------
# | Properties
Name = Interface.DerivedProperty("SharedLibraryTests")
Description = Interface.DerivedProperty(
"Generates code used when testing the Shared Library import/export layer",
)
# ----------------------------------------------------------------------
# | Methods
@staticmethod
@Interface.override
def Generate(
open_file_func,
global_custom_structs,
global_custom_enums,
data,
output_dir,
status_stream,
):
result_code = 0
status_stream.write("Preprocessing data...")
with status_stream.DoneManager():
type_info_data = []
for items in data:
type_info_data.append([TypeInfoData(item, global_custom_structs, global_custom_enums) for item in items])
status_stream.write("Generating Common Files...")
with status_stream.DoneManager() as this_dm:
this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream)
if this_dm.result != 0:
return this_dm.result
for desc, func in [("Generating .h files...", _GenerateHeaderFile)]:
status_stream.write(desc)
with status_stream.DoneManager(
suffix="\n",
) as dm:
for index, (items, items_type_info_data) in enumerate(
zip(data, type_info_data),
):
dm.stream.write(
"Processing '{}' ({} of {})...".format(
items[0].name,
index + 1,
len(data),
),
)
with dm.stream.DoneManager() as this_dm:
this_dm.result = func(
open_file_func,
output_dir,
items,
items_type_info_data,
this_dm.stream,
)
if dm.result < 0:
return dm.result
result_code = result_code or dm.result
return result_code
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def _GenerateHeaderFile(open_file_func, output_dir, items, all_type_info_data, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibraryTests_{}.h".format(items[0].name)),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_{name}.h"
#include "Traits.h"
#include "Featurizers/Structs.h"
#include "SharedLibraryTests_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
""",
).format(
name=items[0].name,
),
)
for item, type_info_data in zip(items, all_type_info_data):
template = getattr(item, "template", None)
if template:
suffix = "_{}_".format(template)
type_desc = " <{}>".format(template)
cpp_template_suffix = "<{}>".format(
type_info_data.InputTypeInfo.CppType,
)
else:
suffix = "_"
type_desc = ""
cpp_template_suffix = ""
if type_info_data.ConfigurationParamTypeInfos:
constructor_template_params = ", typename... ConstructorArgTs"
constructor_params = ",\n ConstructorArgTs &&... constructor_args"
constructor_args = "std::forward<ConstructorArgTs>(constructor_args)..., "
else:
constructor_template_params = ""
constructor_params = ""
constructor_args = ""
fit_prefix_statements = ""
transform_input_args = type_info_data.InputTypeInfo.GetTransformInputArgs()
if isinstance(transform_input_args, tuple):
transform_input_args, fit_prefix_statements = transform_input_args
# Special processing for vector<bool>
if type_info_data.InputTypeInfo.TypeName == "bool":
# vector<bool> isn't actually a bool, so we can't take a direct reference to it
for_loop = "for(bool input : inference_input)"
else:
for_loop = "for(auto const & input : inference_input)"
if type_info_data.OutputTypeInfo.TypeName == "bool":
# vector<bool> doesn't support emplace_back on some platforms
invocation_template = "results.push_back({});"
else:
invocation_template = "results.emplace_back({});"
# Get the output statement information
if item.has_dynamic_output:
output_statement_info = type_info_data.DynamicOutputTypeInfo.GetOutputInfo(
invocation_template=invocation_template,
result_name="results",
)
else:
output_statement_info = type_info_data.OutputTypeInfo.GetOutputInfo(
invocation_template=invocation_template,
result_name="results",
)
# Write the training statements
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* | {name}{type_desc} */
template <typename VectorInputT{constructor_template_params}>
void {name}{suffix}Test(
std::vector<VectorInputT> const &training_input,
std::vector<VectorInputT> const &inference_input,
std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}
) {{
ErrorInfoHandle * pErrorInfo(nullptr);
// Create the estimator
{name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);
REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));
REQUIRE(pEstimatorHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Train
if(training_input.empty() == false) {{
typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());
while(true) {{
TrainingState trainingState(0);
REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(trainingState != Training)
break;
FitResult result(0);
auto const & input(*iter);
{fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
if(result == ResetAndContinue) {{
iter = training_input.begin();
continue;
}}
++iter;
if(iter == training_input.end()) {{
REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
iter = training_input.begin();
}}
}}
}}
{name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);
REQUIRE(pErrorInfo == nullptr);
// Once here, training should be complete
{{
bool is_complete(false);
REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
REQUIRE(is_complete);
}}
// Create the Transformer
{name}{suffix}TransformerHandle * pTransformerHandle(nullptr);
REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));
REQUIRE(pTransformerHandle != nullptr);
REQUIRE(pErrorInfo == nullptr);
// Destroy the estimator
REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
""",
).format(
name=item.name,
type_desc=type_desc,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
constructor_template_params=constructor_template_params,
constructor_params=constructor_params,
constructor_args=constructor_args,
fit_input_args=transform_input_args,
fit_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements.rstrip(),
12,
),
),
),
)
# Write the inferencing statements
inline_destroy_statement = "// No inline destroy statement"
trailing_destroy_statement = "// No trailing destroy statement"
if output_statement_info.DestroyArgs:
if output_statement_info.DestroyInline:
inline_destroy_statement = textwrap.dedent(
"""\
// Destroy the contents
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
""",
).format(
name=item.name,
suffix=suffix,
args=output_statement_info.DestroyArgs,
)
else:
trailing_destroy_statement = textwrap.dedent(
"""\
for(auto & {var_name}: results) {{
REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
""",
).format(
name=item.name,
suffix=suffix,
args=output_statement_info.DestroyArgs,
var_name=output_statement_info.DestroyVarName or "result",
)
if item.has_dynamic_output:
f.write(
StringHelpers.LeftJustify(
textwrap.dedent(
"""\
// Inference
std::vector<{vector_result_type}> results;
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
if(true) {{
{transform_vars}
REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
""",
).format(
name=item.name,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
for_loop=for_loop,
transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements,
4,
).rstrip(),
),
transform_vars=StringHelpers.LeftJustify(
"\n".join(
[
"{} {};".format(var.Type, var.Name)
for var in output_statement_info.TransformVars
]
),
4,
),
transform_input_args=transform_input_args,
transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(),
4,
),
inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(),
4,
),
),
4,
skip_first_line=False,
),
)
else:
f.write(
StringHelpers.LeftJustify(
textwrap.dedent(
"""\
// Inference
std::vector<{vector_result_type}> results;
results.reserve(inference_input.size());
{for_loop} {{
{transform_prefix_statements}{transform_vars}
REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
{transform_statement}
{inline_destroy_statement}
}}
""",
).format(
name=item.name,
suffix=suffix,
vector_result_type=output_statement_info.VectorResultType,
for_loop=for_loop,
transform_prefix_statements="" if not fit_prefix_statements else "{}\n\n ".format(
StringHelpers.LeftJustify(
fit_prefix_statements,
4,
).rstrip(),
),
transform_vars=StringHelpers.LeftJustify(
"\n".join(
[
"{} {};".format(var.Type, var.Name)
for var in output_statement_info.TransformVars
]
),
4,
),
transform_input_args=transform_input_args,
transform_output_args=", ".join(["&{}".format(p.Name) for p in output_statement_info.TransformVars]),
transform_statement=StringHelpers.LeftJustify(
output_statement_info.AppendResultStatement.rstrip(),
4,
),
inline_destroy_statement=StringHelpers.LeftJustify(
inline_destroy_statement.rstrip(),
4,
),
),
4,
skip_first_line=False,
),
)
f.write(
textwrap.dedent(
"""\
REQUIRE(verify_func(results));
{trailing_destroy_statement}
// Destroy the transformer
REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));
REQUIRE(pErrorInfo == nullptr);
}}
""",
).format(
name=item.name,
suffix=suffix,
trailing_destroy_statement=StringHelpers.LeftJustify(
trailing_destroy_statement.rstrip(),
4,
),
),
)
f.write(
textwrap.dedent(
"""\
#if (defined _MSC_VER)
# pragma warning(pop)
#endif
""",
),
)
# ----------------------------------------------------------------------
def _GenerateCommonFiles(open_file_func, output_dir, output_stream):
with open_file_func(
os.path.join(output_dir, "SharedLibraryTests_Common.hpp"),
"w",
) as f:
f.write(
textwrap.dedent(
"""\
/* ---------------------------------------------------------------------- */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* Licensed under the MIT License */
/* ---------------------------------------------------------------------- */
#pragma once
#include "SharedLibrary_Common.hpp"
#if (defined _MSC_VER)
# pragma warning(push)
// I don't know why MSVC thinks that there is unreachable
// code in these methods during release builds.
# pragma warning(disable: 4702) // Unreachable code
# pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used
# pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used
#endif
""",
),
)
for type_info_class in TypeInfoData.EnumTypeInfoClasses():
type_info_class.CreateHelperMethods(f)
f.write(
textwrap.dedent(
"""\
#if (defined _MSC_VER)
# pragma warning(pop)
#endif
""",
),
)
return 0
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class TypeInfoData(object):
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def __init__(self, item, global_custom_structs, global_custom_enums):
# Create the custom enums
custom_enums = OrderedDict()
for custom_enum in itertools.chain(global_custom_enums, getattr(item, "custom_enums", [])):
if isinstance(custom_enum.underlying_type, six.string_types):
type_info = self._CreateTypeInfo(custom_enum.underlying_type)
assert type_info, custom_enum.underlying_type
custom_enum.underlying_type_info = type_info
custom_enums[custom_enum.name] = custom_enum
# Create the custom structs
custom_structs = OrderedDict()
for custom_struct in itertools.chain(global_custom_structs, getattr(item, "custom_structs", [])):
members = OrderedDict()
for member in custom_struct.members:
type_info = self._CreateTypeInfo(member.type)
assert type_info, member.type
assert member.name not in members, member.name
members[member.name] = type_info
custom_structs[custom_struct.name] = members
# Create the configuration param type infos
configuration_param_type_infos = []
for configuration_param in getattr(item, "configuration_params", []):
if configuration_param.type in custom_enums:
type_info = custom_enums[configuration_param.type].underlying_type_info
configuration_param.is_enum = True
else:
type_info = self._CreateTypeInfo(
configuration_param.type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert type_info, configuration_param.type
configuration_param_type_infos.append(type_info)
input_type_info = self._CreateTypeInfo(
item.input_type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert input_type_info, item.input_type
output_type_info = self._CreateTypeInfo(
item.output_type,
custom_structs=custom_structs,
custom_enums=custom_enums,
)
assert output_type_info, item.output_type
dynamic_output_info = self._CreateTypeInfo(
"vector<{}>".format(item.output_type),
custom_structs=custom_structs,
custom_enums=custom_enums,
)
# Commit the results
self.CustomStructs = custom_structs
self.ConfigurationParamTypeInfos = configuration_param_type_infos
self.InputTypeInfo = input_type_info
self.OutputTypeInfo = output_type_info
self.DynamicOutputTypeInfo = dynamic_output_info
# ----------------------------------------------------------------------
@classmethod
def EnumTypeInfoClasses(cls):
cls._InitTypeInfoClasses()
yield from cls._type_info_classes
# ----------------------------------------------------------------------
# |
# | Private Data
# |
# ----------------------------------------------------------------------
_type_info_classes = None
# ----------------------------------------------------------------------
# |
# | Private Methods
# |
# ----------------------------------------------------------------------
@classmethod
def _InitTypeInfoClasses(cls):
if cls._type_info_classes is not None:
return
from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo
from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos
from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo
from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo
type_info_classes = [
DatetimeTypeInfo,
MatrixTypeInfo,
SingleValueSparseVectorTypeInfo,
SparseVectorTypeInfo,
StringTypeInfo,
TupleTypeInfo,
UniqueIdTypeInfo,
VectorTypeInfo,
]
for compound_module in [ScalarTypeInfos, StructTypeInfos]:
for obj_name in dir(compound_module):
if (
obj_name.startswith("_")
or not obj_name.endswith("TypeInfo")
or obj_name == "TypeInfo"
):
continue
type_info_classes.append(getattr(compound_module, obj_name))
# Associate the type infos with the class rather than the instance
# so that we only need to perform this initialization once.
cls._type_info_classes = type_info_classes
# ----------------------------------------------------------------------
@classmethod
def _CreateTypeInfo(cls, the_type, *args, **kwargs):
cls._InitTypeInfoClasses()
is_optional = False
if the_type.endswith("?"):
the_type = the_type[:-1]
is_optional = True
type_info_class = None
for this_type_info_class in cls._type_info_classes:
if isinstance(this_type_info_class.TypeName, six.string_types):
if this_type_info_class.TypeName == the_type:
type_info_class = this_type_info_class
break
elif hasattr(this_type_info_class.TypeName, "match"):
if this_type_info_class.TypeName.match(the_type):
type_info_class = this_type_info_class
break
if type_info_class is None:
return None
return type_info_class(
*args,
member_type=the_type,
is_optional=is_optional,
create_type_info_func=cls._CreateTypeInfo,
**kwargs
)
|
flexible
|
{
"blob_id": "d8befc4a79176aefcccd3dceddf04ca965601e5c",
"index": 2856,
"step-1": "<mask token>\n\n\n@Interface.staticderived\nclass Plugin(PluginBase):\n <mask token>\n <mask token>\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\n<mask token>\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n",
"step-2": "<mask token>\n\n\n@Interface.staticderived\nclass Plugin(PluginBase):\n Name = Interface.DerivedProperty('SharedLibraryTests')\n Description = Interface.DerivedProperty(\n 'Generates code used when testing the Shared Library import/export layer'\n )\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\n<mask token>\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n",
"step-3": "<mask token>\n\n\n@Interface.staticderived\nclass Plugin(PluginBase):\n Name = Interface.DerivedProperty('SharedLibraryTests')\n Description = Interface.DerivedProperty(\n 'Generates code used when testing the Shared Library import/export layer'\n )\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\ndef _GenerateHeaderFile(open_file_func, output_dir, items,\n all_type_info_data, output_stream):\n with open_file_func(os.path.join(output_dir, 'SharedLibraryTests_{}.h'.\n format(items[0].name)), 'w') as f:\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\n /* Licensed under the MIT License */\n /* ---------------------------------------------------------------------- */\n #pragma once\n\n #include \"SharedLibrary_{name}.h\"\n\n #include \"Traits.h\"\n #include \"Featurizers/Structs.h\"\n\n #include \"SharedLibraryTests_Common.hpp\"\n\n #if (defined _MSC_VER)\n # pragma warning(push)\n\n // I don't know why MSVC thinks that there is unreachable\n // code in these methods during release builds.\n # pragma warning(disable: 4702) // Unreachable code\n\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\n #endif\n\n \"\"\"\n ).format(name=items[0].name))\n for item, type_info_data in zip(items, all_type_info_data):\n template = getattr(item, 'template', None)\n if template:\n suffix = '_{}_'.format(template)\n type_desc = ' <{}>'.format(template)\n cpp_template_suffix = '<{}>'.format(type_info_data.\n InputTypeInfo.CppType)\n else:\n suffix = '_'\n type_desc = ''\n cpp_template_suffix = ''\n if type_info_data.ConfigurationParamTypeInfos:\n constructor_template_params = ', typename... ConstructorArgTs'\n constructor_params = (\n ',\\n ConstructorArgTs &&... constructor_args')\n constructor_args = (\n 'std::forward<ConstructorArgTs>(constructor_args)..., ')\n else:\n constructor_template_params = ''\n constructor_params = ''\n constructor_args = ''\n fit_prefix_statements = ''\n transform_input_args = (type_info_data.InputTypeInfo.\n GetTransformInputArgs())\n if isinstance(transform_input_args, tuple):\n transform_input_args, fit_prefix_statements = (\n transform_input_args)\n if type_info_data.InputTypeInfo.TypeName == 'bool':\n for_loop = 'for(bool input : inference_input)'\n else:\n for_loop = 'for(auto const & input : inference_input)'\n if type_info_data.OutputTypeInfo.TypeName == 'bool':\n invocation_template = 'results.push_back({});'\n else:\n invocation_template = 'results.emplace_back({});'\n if item.has_dynamic_output:\n output_statement_info = (type_info_data.\n DynamicOutputTypeInfo.GetOutputInfo(invocation_template\n =invocation_template, result_name='results'))\n else:\n output_statement_info = (type_info_data.OutputTypeInfo.\n GetOutputInfo(invocation_template=invocation_template,\n result_name='results'))\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* | {name}{type_desc} */\n template <typename VectorInputT{constructor_template_params}>\n void {name}{suffix}Test(\n std::vector<VectorInputT> const &training_input,\n std::vector<VectorInputT> const &inference_input,\n std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}\n ) {{\n ErrorInfoHandle * pErrorInfo(nullptr);\n\n // Create the estimator\n {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));\n REQUIRE(pEstimatorHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Train\n if(training_input.empty() == false) {{\n typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());\n\n while(true) {{\n TrainingState trainingState(0);\n\n REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(trainingState != Training)\n break;\n\n FitResult result(0);\n auto const & input(*iter);\n\n {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(result == ResetAndContinue) {{\n iter = training_input.begin();\n continue;\n }}\n\n ++iter;\n if(iter == training_input.end()) {{\n REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n iter = training_input.begin();\n }}\n }}\n }}\n\n {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);\n REQUIRE(pErrorInfo == nullptr);\n\n // Once here, training should be complete\n {{\n bool is_complete(false);\n\n REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n REQUIRE(is_complete);\n }}\n\n // Create the Transformer\n {name}{suffix}TransformerHandle * pTransformerHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));\n REQUIRE(pTransformerHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Destroy the estimator\n REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n \"\"\"\n ).format(name=item.name, type_desc=type_desc, suffix=suffix,\n vector_result_type=output_statement_info.VectorResultType,\n constructor_template_params=constructor_template_params,\n constructor_params=constructor_params, constructor_args=\n constructor_args, fit_input_args=transform_input_args,\n fit_prefix_statements='' if not fit_prefix_statements else\n \"\"\"{}\n\n \"\"\".format(StringHelpers.LeftJustify(\n fit_prefix_statements.rstrip(), 12))))\n inline_destroy_statement = '// No inline destroy statement'\n trailing_destroy_statement = '// No trailing destroy statement'\n if output_statement_info.DestroyArgs:\n if output_statement_info.DestroyInline:\n inline_destroy_statement = textwrap.dedent(\n \"\"\"\n // Destroy the contents\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs)\n else:\n trailing_destroy_statement = textwrap.dedent(\n \"\"\" for(auto & {var_name}: results) {{\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs, var_name=\n output_statement_info.DestroyVarName or 'result')\n if item.has_dynamic_output:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n\n if(true) {{\n {transform_vars}\n\n REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n else:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n results.reserve(inference_input.size());\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n f.write(textwrap.dedent(\n \"\"\"\n REQUIRE(verify_func(results));\n\n {trailing_destroy_statement}\n\n // Destroy the transformer\n REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n trailing_destroy_statement=StringHelpers.LeftJustify(\n trailing_destroy_statement.rstrip(), 4)))\n f.write(textwrap.dedent(\n \"\"\" #if (defined _MSC_VER)\n # pragma warning(pop)\n #endif\n \"\"\"\n ))\n\n\n<mask token>\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n",
"step-4": "<mask token>\n\n\n@Interface.staticderived\nclass Plugin(PluginBase):\n Name = Interface.DerivedProperty('SharedLibraryTests')\n Description = Interface.DerivedProperty(\n 'Generates code used when testing the Shared Library import/export layer'\n )\n\n @staticmethod\n @Interface.override\n def Generate(open_file_func, global_custom_structs, global_custom_enums,\n data, output_dir, status_stream):\n result_code = 0\n status_stream.write('Preprocessing data...')\n with status_stream.DoneManager():\n type_info_data = []\n for items in data:\n type_info_data.append([TypeInfoData(item,\n global_custom_structs, global_custom_enums) for item in\n items])\n status_stream.write('Generating Common Files...')\n with status_stream.DoneManager() as this_dm:\n this_dm.result = _GenerateCommonFiles(open_file_func,\n output_dir, this_dm.stream)\n if this_dm.result != 0:\n return this_dm.result\n for desc, func in [('Generating .h files...', _GenerateHeaderFile)]:\n status_stream.write(desc)\n with status_stream.DoneManager(suffix='\\n') as dm:\n for index, (items, items_type_info_data) in enumerate(zip(\n data, type_info_data)):\n dm.stream.write(\"Processing '{}' ({} of {})...\".format(\n items[0].name, index + 1, len(data)))\n with dm.stream.DoneManager() as this_dm:\n this_dm.result = func(open_file_func, output_dir,\n items, items_type_info_data, this_dm.stream)\n if dm.result < 0:\n return dm.result\n result_code = result_code or dm.result\n return result_code\n\n\ndef _GenerateHeaderFile(open_file_func, output_dir, items,\n all_type_info_data, output_stream):\n with open_file_func(os.path.join(output_dir, 'SharedLibraryTests_{}.h'.\n format(items[0].name)), 'w') as f:\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\n /* Licensed under the MIT License */\n /* ---------------------------------------------------------------------- */\n #pragma once\n\n #include \"SharedLibrary_{name}.h\"\n\n #include \"Traits.h\"\n #include \"Featurizers/Structs.h\"\n\n #include \"SharedLibraryTests_Common.hpp\"\n\n #if (defined _MSC_VER)\n # pragma warning(push)\n\n // I don't know why MSVC thinks that there is unreachable\n // code in these methods during release builds.\n # pragma warning(disable: 4702) // Unreachable code\n\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\n #endif\n\n \"\"\"\n ).format(name=items[0].name))\n for item, type_info_data in zip(items, all_type_info_data):\n template = getattr(item, 'template', None)\n if template:\n suffix = '_{}_'.format(template)\n type_desc = ' <{}>'.format(template)\n cpp_template_suffix = '<{}>'.format(type_info_data.\n InputTypeInfo.CppType)\n else:\n suffix = '_'\n type_desc = ''\n cpp_template_suffix = ''\n if type_info_data.ConfigurationParamTypeInfos:\n constructor_template_params = ', typename... ConstructorArgTs'\n constructor_params = (\n ',\\n ConstructorArgTs &&... constructor_args')\n constructor_args = (\n 'std::forward<ConstructorArgTs>(constructor_args)..., ')\n else:\n constructor_template_params = ''\n constructor_params = ''\n constructor_args = ''\n fit_prefix_statements = ''\n transform_input_args = (type_info_data.InputTypeInfo.\n GetTransformInputArgs())\n if isinstance(transform_input_args, tuple):\n transform_input_args, fit_prefix_statements = (\n transform_input_args)\n if type_info_data.InputTypeInfo.TypeName == 'bool':\n for_loop = 'for(bool input : inference_input)'\n else:\n for_loop = 'for(auto const & input : inference_input)'\n if type_info_data.OutputTypeInfo.TypeName == 'bool':\n invocation_template = 'results.push_back({});'\n else:\n invocation_template = 'results.emplace_back({});'\n if item.has_dynamic_output:\n output_statement_info = (type_info_data.\n DynamicOutputTypeInfo.GetOutputInfo(invocation_template\n =invocation_template, result_name='results'))\n else:\n output_statement_info = (type_info_data.OutputTypeInfo.\n GetOutputInfo(invocation_template=invocation_template,\n result_name='results'))\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* | {name}{type_desc} */\n template <typename VectorInputT{constructor_template_params}>\n void {name}{suffix}Test(\n std::vector<VectorInputT> const &training_input,\n std::vector<VectorInputT> const &inference_input,\n std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}\n ) {{\n ErrorInfoHandle * pErrorInfo(nullptr);\n\n // Create the estimator\n {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));\n REQUIRE(pEstimatorHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Train\n if(training_input.empty() == false) {{\n typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());\n\n while(true) {{\n TrainingState trainingState(0);\n\n REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(trainingState != Training)\n break;\n\n FitResult result(0);\n auto const & input(*iter);\n\n {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n if(result == ResetAndContinue) {{\n iter = training_input.begin();\n continue;\n }}\n\n ++iter;\n if(iter == training_input.end()) {{\n REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n iter = training_input.begin();\n }}\n }}\n }}\n\n {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);\n REQUIRE(pErrorInfo == nullptr);\n\n // Once here, training should be complete\n {{\n bool is_complete(false);\n\n REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n REQUIRE(is_complete);\n }}\n\n // Create the Transformer\n {name}{suffix}TransformerHandle * pTransformerHandle(nullptr);\n\n REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));\n REQUIRE(pTransformerHandle != nullptr);\n REQUIRE(pErrorInfo == nullptr);\n\n // Destroy the estimator\n REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n \"\"\"\n ).format(name=item.name, type_desc=type_desc, suffix=suffix,\n vector_result_type=output_statement_info.VectorResultType,\n constructor_template_params=constructor_template_params,\n constructor_params=constructor_params, constructor_args=\n constructor_args, fit_input_args=transform_input_args,\n fit_prefix_statements='' if not fit_prefix_statements else\n \"\"\"{}\n\n \"\"\".format(StringHelpers.LeftJustify(\n fit_prefix_statements.rstrip(), 12))))\n inline_destroy_statement = '// No inline destroy statement'\n trailing_destroy_statement = '// No trailing destroy statement'\n if output_statement_info.DestroyArgs:\n if output_statement_info.DestroyInline:\n inline_destroy_statement = textwrap.dedent(\n \"\"\"\n // Destroy the contents\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs)\n else:\n trailing_destroy_statement = textwrap.dedent(\n \"\"\" for(auto & {var_name}: results) {{\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix, args=\n output_statement_info.DestroyArgs, var_name=\n output_statement_info.DestroyVarName or 'result')\n if item.has_dynamic_output:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n\n if(true) {{\n {transform_vars}\n\n REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n else:\n f.write(StringHelpers.LeftJustify(textwrap.dedent(\n \"\"\" // Inference\n std::vector<{vector_result_type}> results;\n\n results.reserve(inference_input.size());\n\n {for_loop} {{\n {transform_prefix_statements}{transform_vars}\n\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n\n {transform_statement}\n {inline_destroy_statement}\n }}\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n vector_result_type=output_statement_info.\n VectorResultType, for_loop=for_loop,\n transform_prefix_statements='' if not\n fit_prefix_statements else '{}\\n\\n '.format(\n StringHelpers.LeftJustify(fit_prefix_statements, 4).\n rstrip()), transform_vars=StringHelpers.LeftJustify(\n '\\n'.join(['{} {};'.format(var.Type, var.Name) for var in\n output_statement_info.TransformVars]), 4),\n transform_input_args=transform_input_args,\n transform_output_args=', '.join(['&{}'.format(p.Name) for\n p in output_statement_info.TransformVars]),\n transform_statement=StringHelpers.LeftJustify(\n output_statement_info.AppendResultStatement.rstrip(), 4\n ), inline_destroy_statement=StringHelpers.LeftJustify(\n inline_destroy_statement.rstrip(), 4)), 4,\n skip_first_line=False))\n f.write(textwrap.dedent(\n \"\"\"\n REQUIRE(verify_func(results));\n\n {trailing_destroy_statement}\n\n // Destroy the transformer\n REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));\n REQUIRE(pErrorInfo == nullptr);\n }}\n\n \"\"\"\n ).format(name=item.name, suffix=suffix,\n trailing_destroy_statement=StringHelpers.LeftJustify(\n trailing_destroy_statement.rstrip(), 4)))\n f.write(textwrap.dedent(\n \"\"\" #if (defined _MSC_VER)\n # pragma warning(pop)\n #endif\n \"\"\"\n ))\n\n\ndef _GenerateCommonFiles(open_file_func, output_dir, output_stream):\n with open_file_func(os.path.join(output_dir,\n 'SharedLibraryTests_Common.hpp'), 'w') as f:\n f.write(textwrap.dedent(\n \"\"\" /* ---------------------------------------------------------------------- */\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\n /* Licensed under the MIT License */\n /* ---------------------------------------------------------------------- */\n #pragma once\n\n #include \"SharedLibrary_Common.hpp\"\n\n #if (defined _MSC_VER)\n # pragma warning(push)\n\n // I don't know why MSVC thinks that there is unreachable\n // code in these methods during release builds.\n # pragma warning(disable: 4702) // Unreachable code\n\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\n #endif\n \"\"\"\n ))\n for type_info_class in TypeInfoData.EnumTypeInfoClasses():\n type_info_class.CreateHelperMethods(f)\n f.write(textwrap.dedent(\n \"\"\" #if (defined _MSC_VER)\n # pragma warning(pop)\n #endif\n \"\"\"\n ))\n return 0\n\n\nclass TypeInfoData(object):\n\n def __init__(self, item, global_custom_structs, global_custom_enums):\n custom_enums = OrderedDict()\n for custom_enum in itertools.chain(global_custom_enums, getattr(\n item, 'custom_enums', [])):\n if isinstance(custom_enum.underlying_type, six.string_types):\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\n assert type_info, custom_enum.underlying_type\n custom_enum.underlying_type_info = type_info\n custom_enums[custom_enum.name] = custom_enum\n custom_structs = OrderedDict()\n for custom_struct in itertools.chain(global_custom_structs, getattr\n (item, 'custom_structs', [])):\n members = OrderedDict()\n for member in custom_struct.members:\n type_info = self._CreateTypeInfo(member.type)\n assert type_info, member.type\n assert member.name not in members, member.name\n members[member.name] = type_info\n custom_structs[custom_struct.name] = members\n configuration_param_type_infos = []\n for configuration_param in getattr(item, 'configuration_params', []):\n if configuration_param.type in custom_enums:\n type_info = custom_enums[configuration_param.type\n ].underlying_type_info\n configuration_param.is_enum = True\n else:\n type_info = self._CreateTypeInfo(configuration_param.type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert type_info, configuration_param.type\n configuration_param_type_infos.append(type_info)\n input_type_info = self._CreateTypeInfo(item.input_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert input_type_info, item.input_type\n output_type_info = self._CreateTypeInfo(item.output_type,\n custom_structs=custom_structs, custom_enums=custom_enums)\n assert output_type_info, item.output_type\n dynamic_output_info = self._CreateTypeInfo('vector<{}>'.format(item\n .output_type), custom_structs=custom_structs, custom_enums=\n custom_enums)\n self.CustomStructs = custom_structs\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\n self.InputTypeInfo = input_type_info\n self.OutputTypeInfo = output_type_info\n self.DynamicOutputTypeInfo = dynamic_output_info\n\n @classmethod\n def EnumTypeInfoClasses(cls):\n cls._InitTypeInfoClasses()\n yield from cls._type_info_classes\n _type_info_classes = None\n\n @classmethod\n def _InitTypeInfoClasses(cls):\n if cls._type_info_classes is not None:\n return\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\n type_info_classes = [DatetimeTypeInfo, MatrixTypeInfo,\n SingleValueSparseVectorTypeInfo, SparseVectorTypeInfo,\n StringTypeInfo, TupleTypeInfo, UniqueIdTypeInfo, VectorTypeInfo]\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\n for obj_name in dir(compound_module):\n if obj_name.startswith('_') or not obj_name.endswith('TypeInfo'\n ) or obj_name == 'TypeInfo':\n continue\n type_info_classes.append(getattr(compound_module, obj_name))\n cls._type_info_classes = type_info_classes\n\n @classmethod\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\n cls._InitTypeInfoClasses()\n is_optional = False\n if the_type.endswith('?'):\n the_type = the_type[:-1]\n is_optional = True\n type_info_class = None\n for this_type_info_class in cls._type_info_classes:\n if isinstance(this_type_info_class.TypeName, six.string_types):\n if this_type_info_class.TypeName == the_type:\n type_info_class = this_type_info_class\n break\n elif hasattr(this_type_info_class.TypeName, 'match'):\n if this_type_info_class.TypeName.match(the_type):\n type_info_class = this_type_info_class\n break\n if type_info_class is None:\n return None\n return type_info_class(*args, member_type=the_type, is_optional=\n is_optional, create_type_info_func=cls._CreateTypeInfo, **kwargs)\n",
"step-5": "# ----------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License\r\n# ----------------------------------------------------------------------\r\n\"\"\"Contains the Plugin object\"\"\"\r\n\r\nimport itertools\r\nimport os\r\nimport sys\r\nimport textwrap\r\n\r\nfrom collections import OrderedDict\r\n\r\nimport six\r\n\r\nimport CommonEnvironment\r\nfrom CommonEnvironment.CallOnExit import CallOnExit\r\nfrom CommonEnvironment import StringHelpers\r\nfrom CommonEnvironment import Interface\r\n\r\n# ----------------------------------------------------------------------\r\n_script_fullpath = CommonEnvironment.ThisFullpath()\r\n_script_dir, _script_name = os.path.split(_script_fullpath)\r\n# ----------------------------------------------------------------------\r\n\r\nsys.path.insert(0, os.path.join(_script_dir, \"..\"))\r\nwith CallOnExit(lambda: sys.path.pop(0)):\r\n from Plugin import Plugin as PluginBase, TypeVisitor as TypeVisitorBase\r\n\r\n# ----------------------------------------------------------------------\r\n@Interface.staticderived\r\nclass Plugin(PluginBase):\r\n # ----------------------------------------------------------------------\r\n # | Properties\r\n Name = Interface.DerivedProperty(\"SharedLibraryTests\")\r\n Description = Interface.DerivedProperty(\r\n \"Generates code used when testing the Shared Library import/export layer\",\r\n )\r\n\r\n # ----------------------------------------------------------------------\r\n # | Methods\r\n @staticmethod\r\n @Interface.override\r\n def Generate(\r\n open_file_func,\r\n global_custom_structs,\r\n global_custom_enums,\r\n data,\r\n output_dir,\r\n status_stream,\r\n ):\r\n result_code = 0\r\n\r\n status_stream.write(\"Preprocessing data...\")\r\n with status_stream.DoneManager():\r\n type_info_data = []\r\n\r\n for items in data:\r\n type_info_data.append([TypeInfoData(item, global_custom_structs, global_custom_enums) for item in items])\r\n\r\n status_stream.write(\"Generating Common Files...\")\r\n with status_stream.DoneManager() as this_dm:\r\n this_dm.result = _GenerateCommonFiles(open_file_func, output_dir, this_dm.stream)\r\n if this_dm.result != 0:\r\n return this_dm.result\r\n\r\n for desc, func in [(\"Generating .h files...\", _GenerateHeaderFile)]:\r\n status_stream.write(desc)\r\n with status_stream.DoneManager(\r\n suffix=\"\\n\",\r\n ) as dm:\r\n for index, (items, items_type_info_data) in enumerate(\r\n zip(data, type_info_data),\r\n ):\r\n dm.stream.write(\r\n \"Processing '{}' ({} of {})...\".format(\r\n items[0].name,\r\n index + 1,\r\n len(data),\r\n ),\r\n )\r\n with dm.stream.DoneManager() as this_dm:\r\n this_dm.result = func(\r\n open_file_func,\r\n output_dir,\r\n items,\r\n items_type_info_data,\r\n this_dm.stream,\r\n )\r\n\r\n if dm.result < 0:\r\n return dm.result\r\n\r\n result_code = result_code or dm.result\r\n\r\n return result_code\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\ndef _GenerateHeaderFile(open_file_func, output_dir, items, all_type_info_data, output_stream):\r\n with open_file_func(\r\n os.path.join(output_dir, \"SharedLibraryTests_{}.h\".format(items[0].name)),\r\n \"w\",\r\n ) as f:\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n /* ---------------------------------------------------------------------- */\r\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\r\n /* Licensed under the MIT License */\r\n /* ---------------------------------------------------------------------- */\r\n #pragma once\r\n\r\n #include \"SharedLibrary_{name}.h\"\r\n\r\n #include \"Traits.h\"\r\n #include \"Featurizers/Structs.h\"\r\n\r\n #include \"SharedLibraryTests_Common.hpp\"\r\n\r\n #if (defined _MSC_VER)\r\n # pragma warning(push)\r\n\r\n // I don't know why MSVC thinks that there is unreachable\r\n // code in these methods during release builds.\r\n # pragma warning(disable: 4702) // Unreachable code\r\n\r\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\r\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\r\n #endif\r\n\r\n \"\"\",\r\n ).format(\r\n name=items[0].name,\r\n ),\r\n )\r\n\r\n for item, type_info_data in zip(items, all_type_info_data):\r\n template = getattr(item, \"template\", None)\r\n if template:\r\n suffix = \"_{}_\".format(template)\r\n type_desc = \" <{}>\".format(template)\r\n cpp_template_suffix = \"<{}>\".format(\r\n type_info_data.InputTypeInfo.CppType,\r\n )\r\n else:\r\n suffix = \"_\"\r\n type_desc = \"\"\r\n cpp_template_suffix = \"\"\r\n\r\n if type_info_data.ConfigurationParamTypeInfos:\r\n constructor_template_params = \", typename... ConstructorArgTs\"\r\n constructor_params = \",\\n ConstructorArgTs &&... constructor_args\"\r\n constructor_args = \"std::forward<ConstructorArgTs>(constructor_args)..., \"\r\n else:\r\n constructor_template_params = \"\"\r\n constructor_params = \"\"\r\n constructor_args = \"\"\r\n\r\n fit_prefix_statements = \"\"\r\n\r\n transform_input_args = type_info_data.InputTypeInfo.GetTransformInputArgs()\r\n if isinstance(transform_input_args, tuple):\r\n transform_input_args, fit_prefix_statements = transform_input_args\r\n\r\n # Special processing for vector<bool>\r\n if type_info_data.InputTypeInfo.TypeName == \"bool\":\r\n # vector<bool> isn't actually a bool, so we can't take a direct reference to it\r\n for_loop = \"for(bool input : inference_input)\"\r\n else:\r\n for_loop = \"for(auto const & input : inference_input)\"\r\n\r\n if type_info_data.OutputTypeInfo.TypeName == \"bool\":\r\n # vector<bool> doesn't support emplace_back on some platforms\r\n invocation_template = \"results.push_back({});\"\r\n else:\r\n invocation_template = \"results.emplace_back({});\"\r\n\r\n # Get the output statement information\r\n if item.has_dynamic_output:\r\n output_statement_info = type_info_data.DynamicOutputTypeInfo.GetOutputInfo(\r\n invocation_template=invocation_template,\r\n result_name=\"results\",\r\n )\r\n else:\r\n output_statement_info = type_info_data.OutputTypeInfo.GetOutputInfo(\r\n invocation_template=invocation_template,\r\n result_name=\"results\",\r\n )\r\n\r\n # Write the training statements\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n /* ---------------------------------------------------------------------- */\r\n /* | {name}{type_desc} */\r\n template <typename VectorInputT{constructor_template_params}>\r\n void {name}{suffix}Test(\r\n std::vector<VectorInputT> const &training_input,\r\n std::vector<VectorInputT> const &inference_input,\r\n std::function<bool (std::vector<{vector_result_type}> const &)> const &verify_func{constructor_params}\r\n ) {{\r\n ErrorInfoHandle * pErrorInfo(nullptr);\r\n\r\n // Create the estimator\r\n {name}{suffix}EstimatorHandle *pEstimatorHandle(nullptr);\r\n\r\n REQUIRE({name}{suffix}CreateEstimator({constructor_args}&pEstimatorHandle, &pErrorInfo));\r\n REQUIRE(pEstimatorHandle != nullptr);\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n // Train\r\n if(training_input.empty() == false) {{\r\n typename std::vector<VectorInputT>::const_iterator iter(training_input.begin());\r\n\r\n while(true) {{\r\n TrainingState trainingState(0);\r\n\r\n REQUIRE({name}{suffix}GetState(pEstimatorHandle, &trainingState, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n if(trainingState != Training)\r\n break;\r\n\r\n FitResult result(0);\r\n auto const & input(*iter);\r\n\r\n {fit_prefix_statements}REQUIRE({name}{suffix}Fit(pEstimatorHandle, {fit_input_args}, &result, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n if(result == ResetAndContinue) {{\r\n iter = training_input.begin();\r\n continue;\r\n }}\r\n\r\n ++iter;\r\n if(iter == training_input.end()) {{\r\n REQUIRE({name}{suffix}OnDataCompleted(pEstimatorHandle, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n iter = training_input.begin();\r\n }}\r\n }}\r\n }}\r\n\r\n {name}{suffix}CompleteTraining(pEstimatorHandle, &pErrorInfo);\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n // Once here, training should be complete\r\n {{\r\n bool is_complete(false);\r\n\r\n REQUIRE({name}{suffix}IsTrainingComplete(pEstimatorHandle, &is_complete, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n REQUIRE(is_complete);\r\n }}\r\n\r\n // Create the Transformer\r\n {name}{suffix}TransformerHandle * pTransformerHandle(nullptr);\r\n\r\n REQUIRE({name}{suffix}CreateTransformerFromEstimator(pEstimatorHandle, &pTransformerHandle, &pErrorInfo));\r\n REQUIRE(pTransformerHandle != nullptr);\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n // Destroy the estimator\r\n REQUIRE({name}{suffix}DestroyEstimator(pEstimatorHandle, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n type_desc=type_desc,\r\n suffix=suffix,\r\n vector_result_type=output_statement_info.VectorResultType,\r\n constructor_template_params=constructor_template_params,\r\n constructor_params=constructor_params,\r\n constructor_args=constructor_args,\r\n fit_input_args=transform_input_args,\r\n fit_prefix_statements=\"\" if not fit_prefix_statements else \"{}\\n\\n \".format(\r\n StringHelpers.LeftJustify(\r\n fit_prefix_statements.rstrip(),\r\n 12,\r\n ),\r\n ),\r\n ),\r\n )\r\n\r\n # Write the inferencing statements\r\n inline_destroy_statement = \"// No inline destroy statement\"\r\n trailing_destroy_statement = \"// No trailing destroy statement\"\r\n\r\n if output_statement_info.DestroyArgs:\r\n if output_statement_info.DestroyInline:\r\n inline_destroy_statement = textwrap.dedent(\r\n \"\"\"\\\r\n\r\n // Destroy the contents\r\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n args=output_statement_info.DestroyArgs,\r\n )\r\n else:\r\n trailing_destroy_statement = textwrap.dedent(\r\n \"\"\"\\\r\n for(auto & {var_name}: results) {{\r\n REQUIRE({name}{suffix}DestroyTransformedData({args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n }}\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n args=output_statement_info.DestroyArgs,\r\n var_name=output_statement_info.DestroyVarName or \"result\",\r\n )\r\n\r\n if item.has_dynamic_output:\r\n f.write(\r\n StringHelpers.LeftJustify(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n // Inference\r\n std::vector<{vector_result_type}> results;\r\n\r\n {for_loop} {{\r\n {transform_prefix_statements}{transform_vars}\r\n\r\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n {transform_statement}\r\n {inline_destroy_statement}\r\n }}\r\n\r\n if(true) {{\r\n {transform_vars}\r\n\r\n REQUIRE({name}{suffix}Flush(pTransformerHandle, {transform_output_args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n {transform_statement}\r\n {inline_destroy_statement}\r\n }}\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n vector_result_type=output_statement_info.VectorResultType,\r\n for_loop=for_loop,\r\n transform_prefix_statements=\"\" if not fit_prefix_statements else \"{}\\n\\n \".format(\r\n StringHelpers.LeftJustify(\r\n fit_prefix_statements,\r\n 4,\r\n ).rstrip(),\r\n ),\r\n transform_vars=StringHelpers.LeftJustify(\r\n \"\\n\".join(\r\n [\r\n \"{} {};\".format(var.Type, var.Name)\r\n for var in output_statement_info.TransformVars\r\n ]\r\n ),\r\n 4,\r\n ),\r\n transform_input_args=transform_input_args,\r\n transform_output_args=\", \".join([\"&{}\".format(p.Name) for p in output_statement_info.TransformVars]),\r\n transform_statement=StringHelpers.LeftJustify(\r\n output_statement_info.AppendResultStatement.rstrip(),\r\n 4,\r\n ),\r\n inline_destroy_statement=StringHelpers.LeftJustify(\r\n inline_destroy_statement.rstrip(),\r\n 4,\r\n ),\r\n ),\r\n 4,\r\n skip_first_line=False,\r\n ),\r\n )\r\n else:\r\n f.write(\r\n StringHelpers.LeftJustify(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n // Inference\r\n std::vector<{vector_result_type}> results;\r\n\r\n results.reserve(inference_input.size());\r\n\r\n {for_loop} {{\r\n {transform_prefix_statements}{transform_vars}\r\n\r\n REQUIRE({name}{suffix}Transform(pTransformerHandle, {transform_input_args}, {transform_output_args}, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n\r\n {transform_statement}\r\n {inline_destroy_statement}\r\n }}\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n vector_result_type=output_statement_info.VectorResultType,\r\n for_loop=for_loop,\r\n transform_prefix_statements=\"\" if not fit_prefix_statements else \"{}\\n\\n \".format(\r\n StringHelpers.LeftJustify(\r\n fit_prefix_statements,\r\n 4,\r\n ).rstrip(),\r\n ),\r\n transform_vars=StringHelpers.LeftJustify(\r\n \"\\n\".join(\r\n [\r\n \"{} {};\".format(var.Type, var.Name)\r\n for var in output_statement_info.TransformVars\r\n ]\r\n ),\r\n 4,\r\n ),\r\n transform_input_args=transform_input_args,\r\n transform_output_args=\", \".join([\"&{}\".format(p.Name) for p in output_statement_info.TransformVars]),\r\n transform_statement=StringHelpers.LeftJustify(\r\n output_statement_info.AppendResultStatement.rstrip(),\r\n 4,\r\n ),\r\n inline_destroy_statement=StringHelpers.LeftJustify(\r\n inline_destroy_statement.rstrip(),\r\n 4,\r\n ),\r\n ),\r\n 4,\r\n skip_first_line=False,\r\n ),\r\n )\r\n\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n\r\n REQUIRE(verify_func(results));\r\n\r\n {trailing_destroy_statement}\r\n\r\n // Destroy the transformer\r\n REQUIRE({name}{suffix}DestroyTransformer(pTransformerHandle, &pErrorInfo));\r\n REQUIRE(pErrorInfo == nullptr);\r\n }}\r\n\r\n \"\"\",\r\n ).format(\r\n name=item.name,\r\n suffix=suffix,\r\n trailing_destroy_statement=StringHelpers.LeftJustify(\r\n trailing_destroy_statement.rstrip(),\r\n 4,\r\n ),\r\n ),\r\n )\r\n\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n #if (defined _MSC_VER)\r\n # pragma warning(pop)\r\n #endif\r\n \"\"\",\r\n ),\r\n )\r\n\r\n\r\n# ----------------------------------------------------------------------\r\ndef _GenerateCommonFiles(open_file_func, output_dir, output_stream):\r\n with open_file_func(\r\n os.path.join(output_dir, \"SharedLibraryTests_Common.hpp\"),\r\n \"w\",\r\n ) as f:\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n /* ---------------------------------------------------------------------- */\r\n /* Copyright (c) Microsoft Corporation. All rights reserved. */\r\n /* Licensed under the MIT License */\r\n /* ---------------------------------------------------------------------- */\r\n #pragma once\r\n\r\n #include \"SharedLibrary_Common.hpp\"\r\n\r\n #if (defined _MSC_VER)\r\n # pragma warning(push)\r\n\r\n // I don't know why MSVC thinks that there is unreachable\r\n // code in these methods during release builds.\r\n # pragma warning(disable: 4702) // Unreachable code\r\n\r\n # pragma warning(disable: 4701) // potentially uninitialized local variable '<name>' used\r\n # pragma warning(disable: 4703) // potentially uninitialized local pointer variable '<name>' used\r\n #endif\r\n \"\"\",\r\n ),\r\n )\r\n\r\n for type_info_class in TypeInfoData.EnumTypeInfoClasses():\r\n type_info_class.CreateHelperMethods(f)\r\n\r\n f.write(\r\n textwrap.dedent(\r\n \"\"\"\\\r\n #if (defined _MSC_VER)\r\n # pragma warning(pop)\r\n #endif\r\n \"\"\",\r\n ),\r\n )\r\n\r\n return 0\r\n\r\n\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\n# ----------------------------------------------------------------------\r\nclass TypeInfoData(object):\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Public Methods\r\n # |\r\n # ----------------------------------------------------------------------\r\n def __init__(self, item, global_custom_structs, global_custom_enums):\r\n # Create the custom enums\r\n custom_enums = OrderedDict()\r\n\r\n for custom_enum in itertools.chain(global_custom_enums, getattr(item, \"custom_enums\", [])):\r\n if isinstance(custom_enum.underlying_type, six.string_types):\r\n type_info = self._CreateTypeInfo(custom_enum.underlying_type)\r\n assert type_info, custom_enum.underlying_type\r\n\r\n custom_enum.underlying_type_info = type_info\r\n\r\n custom_enums[custom_enum.name] = custom_enum\r\n\r\n # Create the custom structs\r\n custom_structs = OrderedDict()\r\n\r\n for custom_struct in itertools.chain(global_custom_structs, getattr(item, \"custom_structs\", [])):\r\n members = OrderedDict()\r\n\r\n for member in custom_struct.members:\r\n type_info = self._CreateTypeInfo(member.type)\r\n assert type_info, member.type\r\n\r\n assert member.name not in members, member.name\r\n members[member.name] = type_info\r\n\r\n custom_structs[custom_struct.name] = members\r\n\r\n # Create the configuration param type infos\r\n configuration_param_type_infos = []\r\n\r\n for configuration_param in getattr(item, \"configuration_params\", []):\r\n if configuration_param.type in custom_enums:\r\n type_info = custom_enums[configuration_param.type].underlying_type_info\r\n configuration_param.is_enum = True\r\n\r\n else:\r\n type_info = self._CreateTypeInfo(\r\n configuration_param.type,\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n assert type_info, configuration_param.type\r\n\r\n configuration_param_type_infos.append(type_info)\r\n\r\n input_type_info = self._CreateTypeInfo(\r\n item.input_type,\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n assert input_type_info, item.input_type\r\n\r\n output_type_info = self._CreateTypeInfo(\r\n item.output_type,\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n assert output_type_info, item.output_type\r\n\r\n dynamic_output_info = self._CreateTypeInfo(\r\n \"vector<{}>\".format(item.output_type),\r\n custom_structs=custom_structs,\r\n custom_enums=custom_enums,\r\n )\r\n\r\n # Commit the results\r\n self.CustomStructs = custom_structs\r\n self.ConfigurationParamTypeInfos = configuration_param_type_infos\r\n self.InputTypeInfo = input_type_info\r\n self.OutputTypeInfo = output_type_info\r\n self.DynamicOutputTypeInfo = dynamic_output_info\r\n\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n def EnumTypeInfoClasses(cls):\r\n cls._InitTypeInfoClasses()\r\n yield from cls._type_info_classes\r\n\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Private Data\r\n # |\r\n # ----------------------------------------------------------------------\r\n _type_info_classes = None\r\n\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Private Methods\r\n # |\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n def _InitTypeInfoClasses(cls):\r\n if cls._type_info_classes is not None:\r\n return\r\n\r\n from Plugins.SharedLibraryTestsPluginImpl.DatetimeTypeInfo import DatetimeTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.MatrixTypeInfo import MatrixTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl import ScalarTypeInfos\r\n from Plugins.SharedLibraryTestsPluginImpl.SingleValueSparseVectorTypeInfo import SingleValueSparseVectorTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.SparseVectorTypeInfo import SparseVectorTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.StringTypeInfo import StringTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl import StructTypeInfos\r\n from Plugins.SharedLibraryTestsPluginImpl.TupleTypeInfo import TupleTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.UniqueIdTypeInfo import UniqueIdTypeInfo\r\n from Plugins.SharedLibraryTestsPluginImpl.VectorTypeInfo import VectorTypeInfo\r\n\r\n type_info_classes = [\r\n DatetimeTypeInfo,\r\n MatrixTypeInfo,\r\n SingleValueSparseVectorTypeInfo,\r\n SparseVectorTypeInfo,\r\n StringTypeInfo,\r\n TupleTypeInfo,\r\n UniqueIdTypeInfo,\r\n VectorTypeInfo,\r\n ]\r\n\r\n for compound_module in [ScalarTypeInfos, StructTypeInfos]:\r\n for obj_name in dir(compound_module):\r\n if (\r\n obj_name.startswith(\"_\")\r\n or not obj_name.endswith(\"TypeInfo\")\r\n or obj_name == \"TypeInfo\"\r\n ):\r\n continue\r\n\r\n type_info_classes.append(getattr(compound_module, obj_name))\r\n\r\n # Associate the type infos with the class rather than the instance\r\n # so that we only need to perform this initialization once.\r\n cls._type_info_classes = type_info_classes\r\n\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n def _CreateTypeInfo(cls, the_type, *args, **kwargs):\r\n cls._InitTypeInfoClasses()\r\n\r\n is_optional = False\r\n\r\n if the_type.endswith(\"?\"):\r\n the_type = the_type[:-1]\r\n is_optional = True\r\n\r\n type_info_class = None\r\n\r\n for this_type_info_class in cls._type_info_classes:\r\n if isinstance(this_type_info_class.TypeName, six.string_types):\r\n if this_type_info_class.TypeName == the_type:\r\n type_info_class = this_type_info_class\r\n break\r\n\r\n elif hasattr(this_type_info_class.TypeName, \"match\"):\r\n if this_type_info_class.TypeName.match(the_type):\r\n type_info_class = this_type_info_class\r\n break\r\n\r\n if type_info_class is None:\r\n return None\r\n\r\n return type_info_class(\r\n *args,\r\n member_type=the_type,\r\n is_optional=is_optional,\r\n create_type_info_func=cls._CreateTypeInfo,\r\n **kwargs\r\n )\r\n",
"step-ids": [
8,
9,
10,
11,
15
]
}
|
[
8,
9,
10,
11,
15
] |
import logging
from unittest.mock import patch, Mock
from intake.tests.base_testcases import ExternalNotificationsPatchTestCase
from intake.tests import mock, factories
from intake.tests.mock_org_answers import get_answers_for_orgs
from intake.management.commands import send_followups
from user_accounts.models import Organization
from project.tests.assertions import assertInLogsCount
class TestCommand(ExternalNotificationsPatchTestCase):
fixtures = [
'counties', 'organizations']
@patch('intake.management.commands.send_followups.is_the_weekend')
@patch('intake.management.commands.send_followups.FollowupsService')
def test_doesnt_do_anything_on_the_weekend(
self, FollowupsService, is_the_weekend):
is_the_weekend.return_value = True
command = send_followups.Command()
command.stdout = Mock()
command.handle()
FollowupsService.assert_not_called()
@patch('intake.management.commands.send_followups.is_the_weekend')
def test_expected_weekday_run(self, is_the_weekend):
is_the_weekend.return_value = False
org = Organization.objects.get(slug='ebclc')
dates = sorted([mock.get_old_date() for i in range(464, 469)])
for date, pk in zip(dates, range(464, 469)):
factories.FormSubmissionWithOrgsFactory.create(
id=pk,
date_received=date,
organizations=[org],
answers=get_answers_for_orgs(
[org],
contact_preferences=[
'prefers_email',
'prefers_sms'],
phone='4445551111',
email='test@test.com',
))
command = send_followups.Command()
command.stdout = Mock()
with self.assertLogs(
'project.services.logging_service', logging.INFO) as logs:
command.handle()
self.assertEqual(
len(self.notifications.email_followup.send.mock_calls), 4)
assertInLogsCount(logs, {'event_name=app_followup_sent': 4})
|
normal
|
{
"blob_id": "5cb67e5fcedafca4ce124e4094cbd8e1e9d95bb4",
"index": 3740,
"step-1": "<mask token>\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n <mask token>\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n <mask token>\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(id=pk,\n date_received=date, organizations=[org], answers=\n get_answers_for_orgs([org], contact_preferences=[\n 'prefers_email', 'prefers_sms'], phone='4445551111', email=\n 'test@test.com'))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs('project.services.logging_service', logging.INFO\n ) as logs:\n command.handle()\n self.assertEqual(len(self.notifications.email_followup.send.\n mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-3": "<mask token>\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n fixtures = ['counties', 'organizations']\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(id=pk,\n date_received=date, organizations=[org], answers=\n get_answers_for_orgs([org], contact_preferences=[\n 'prefers_email', 'prefers_sms'], phone='4445551111', email=\n 'test@test.com'))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs('project.services.logging_service', logging.INFO\n ) as logs:\n command.handle()\n self.assertEqual(len(self.notifications.email_followup.send.\n mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-4": "import logging\nfrom unittest.mock import patch, Mock\nfrom intake.tests.base_testcases import ExternalNotificationsPatchTestCase\nfrom intake.tests import mock, factories\nfrom intake.tests.mock_org_answers import get_answers_for_orgs\nfrom intake.management.commands import send_followups\nfrom user_accounts.models import Organization\nfrom project.tests.assertions import assertInLogsCount\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n fixtures = ['counties', 'organizations']\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(id=pk,\n date_received=date, organizations=[org], answers=\n get_answers_for_orgs([org], contact_preferences=[\n 'prefers_email', 'prefers_sms'], phone='4445551111', email=\n 'test@test.com'))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs('project.services.logging_service', logging.INFO\n ) as logs:\n command.handle()\n self.assertEqual(len(self.notifications.email_followup.send.\n mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-5": "import logging\nfrom unittest.mock import patch, Mock\nfrom intake.tests.base_testcases import ExternalNotificationsPatchTestCase\nfrom intake.tests import mock, factories\nfrom intake.tests.mock_org_answers import get_answers_for_orgs\nfrom intake.management.commands import send_followups\nfrom user_accounts.models import Organization\nfrom project.tests.assertions import assertInLogsCount\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n\n fixtures = [\n 'counties', 'organizations']\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(\n self, FollowupsService, is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(\n id=pk,\n date_received=date,\n organizations=[org],\n answers=get_answers_for_orgs(\n [org],\n contact_preferences=[\n 'prefers_email',\n 'prefers_sms'],\n phone='4445551111',\n email='test@test.com',\n ))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs(\n 'project.services.logging_service', logging.INFO) as logs:\n command.handle()\n self.assertEqual(\n len(self.notifications.email_followup.send.mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 12:28:39 2020
@author: Ксения
"""
import serial
import time
import serial.tools.list_ports as lp
def get_comports_list():
ports=list(lp.comports(include_links=False))
for p in ports:
print(p.device)
return ports
def read_while_LF(com, timeout_ms=500):
read_data =""
delay_ms=10
attempts=int(timeout_ms/delay_ms)
for i in range(attempts):
byte=com.read(size = 1).decode('utf-8')
time.sleep(0.01)
read_data+=byte
if byte == '\n':
break
return read_data
def read_write_gst(com, instruction):
write_data=instruction.encode('utf-8')
com.write(write_data)
recieved = []
while(1):
read_data=read_while_LF(com)
if(read_data == ""):
break
recieved.append(read_data)
return recieved
com = serial.Serial('COM3', baudrate=115200, timeout=0.02)
s=read_write_gst(com, "fil_test:start\r")
print(s)
com.close()
|
normal
|
{
"blob_id": "e08fddefabf1b92aa97b939e05bb31d888df4e6a",
"index": 2241,
"step-1": "<mask token>\n\n\ndef get_comports_list():\n ports = list(lp.comports(include_links=False))\n for p in ports:\n print(p.device)\n return ports\n\n\ndef read_while_LF(com, timeout_ms=500):\n read_data = ''\n delay_ms = 10\n attempts = int(timeout_ms / delay_ms)\n for i in range(attempts):\n byte = com.read(size=1).decode('utf-8')\n time.sleep(0.01)\n read_data += byte\n if byte == '\\n':\n break\n return read_data\n\n\ndef read_write_gst(com, instruction):\n write_data = instruction.encode('utf-8')\n com.write(write_data)\n recieved = []\n while 1:\n read_data = read_while_LF(com)\n if read_data == '':\n break\n recieved.append(read_data)\n return recieved\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_comports_list():\n ports = list(lp.comports(include_links=False))\n for p in ports:\n print(p.device)\n return ports\n\n\ndef read_while_LF(com, timeout_ms=500):\n read_data = ''\n delay_ms = 10\n attempts = int(timeout_ms / delay_ms)\n for i in range(attempts):\n byte = com.read(size=1).decode('utf-8')\n time.sleep(0.01)\n read_data += byte\n if byte == '\\n':\n break\n return read_data\n\n\ndef read_write_gst(com, instruction):\n write_data = instruction.encode('utf-8')\n com.write(write_data)\n recieved = []\n while 1:\n read_data = read_while_LF(com)\n if read_data == '':\n break\n recieved.append(read_data)\n return recieved\n\n\n<mask token>\nprint(s)\ncom.close()\n",
"step-3": "<mask token>\n\n\ndef get_comports_list():\n ports = list(lp.comports(include_links=False))\n for p in ports:\n print(p.device)\n return ports\n\n\ndef read_while_LF(com, timeout_ms=500):\n read_data = ''\n delay_ms = 10\n attempts = int(timeout_ms / delay_ms)\n for i in range(attempts):\n byte = com.read(size=1).decode('utf-8')\n time.sleep(0.01)\n read_data += byte\n if byte == '\\n':\n break\n return read_data\n\n\ndef read_write_gst(com, instruction):\n write_data = instruction.encode('utf-8')\n com.write(write_data)\n recieved = []\n while 1:\n read_data = read_while_LF(com)\n if read_data == '':\n break\n recieved.append(read_data)\n return recieved\n\n\ncom = serial.Serial('COM3', baudrate=115200, timeout=0.02)\ns = read_write_gst(com, 'fil_test:start\\r')\nprint(s)\ncom.close()\n",
"step-4": "<mask token>\nimport serial\nimport time\nimport serial.tools.list_ports as lp\n\n\ndef get_comports_list():\n ports = list(lp.comports(include_links=False))\n for p in ports:\n print(p.device)\n return ports\n\n\ndef read_while_LF(com, timeout_ms=500):\n read_data = ''\n delay_ms = 10\n attempts = int(timeout_ms / delay_ms)\n for i in range(attempts):\n byte = com.read(size=1).decode('utf-8')\n time.sleep(0.01)\n read_data += byte\n if byte == '\\n':\n break\n return read_data\n\n\ndef read_write_gst(com, instruction):\n write_data = instruction.encode('utf-8')\n com.write(write_data)\n recieved = []\n while 1:\n read_data = read_while_LF(com)\n if read_data == '':\n break\n recieved.append(read_data)\n return recieved\n\n\ncom = serial.Serial('COM3', baudrate=115200, timeout=0.02)\ns = read_write_gst(com, 'fil_test:start\\r')\nprint(s)\ncom.close()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 19 12:28:39 2020\n\n@author: Ксения\n\"\"\"\n\n\nimport serial\nimport time\nimport serial.tools.list_ports as lp\n\n\ndef get_comports_list():\n ports=list(lp.comports(include_links=False))\n for p in ports:\n print(p.device)\n return ports\n\n\n\ndef read_while_LF(com, timeout_ms=500):\n read_data =\"\"\n delay_ms=10\n attempts=int(timeout_ms/delay_ms)\n for i in range(attempts): \n byte=com.read(size = 1).decode('utf-8')\n time.sleep(0.01)\n read_data+=byte\n if byte == '\\n':\n break\n\n return read_data\n\ndef read_write_gst(com, instruction): \n\n write_data=instruction.encode('utf-8')\n com.write(write_data)\n recieved = []\n while(1):\n read_data=read_while_LF(com)\n if(read_data == \"\"):\n break\n recieved.append(read_data)\n\n return recieved\n\n\ncom = serial.Serial('COM3', baudrate=115200, timeout=0.02)\n\n\n\n\n\ns=read_write_gst(com, \"fil_test:start\\r\")\n\n\nprint(s)\ncom.close()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
@author Lucas
@date 2019/3/29 21:46
"""
# 二分查找
def search(nums, target):
left = 0
right = len(nums) - 1
while left <= right:
mid = int((left + right)/2)
if target > nums[mid]:
left = mid + 1
elif target < nums[mid]:
right = mid - 1
else:
return mid
return -1
if __name__ == '__main__':
print(search([-1, 0, 3, 5, 9, 12], 12))
|
normal
|
{
"blob_id": "3eeed39bf775e2ac1900142b348f20d15907c6e6",
"index": 4972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef search(nums, target):\n left = 0\n right = len(nums) - 1\n while left <= right:\n mid = int((left + right) / 2)\n if target > nums[mid]:\n left = mid + 1\n elif target < nums[mid]:\n right = mid - 1\n else:\n return mid\n return -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef search(nums, target):\n left = 0\n right = len(nums) - 1\n while left <= right:\n mid = int((left + right) / 2)\n if target > nums[mid]:\n left = mid + 1\n elif target < nums[mid]:\n right = mid - 1\n else:\n return mid\n return -1\n\n\nif __name__ == '__main__':\n print(search([-1, 0, 3, 5, 9, 12], 12))\n",
"step-4": "\"\"\"\n@author Lucas\n@date 2019/3/29 21:46\n\"\"\"\n\n\n# 二分查找\ndef search(nums, target):\n left = 0\n right = len(nums) - 1\n\n while left <= right:\n mid = int((left + right)/2)\n if target > nums[mid]:\n left = mid + 1\n elif target < nums[mid]:\n right = mid - 1\n else:\n return mid\n return -1\n\n\nif __name__ == '__main__':\n print(search([-1, 0, 3, 5, 9, 12], 12))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/ui_LibraryTab.ui'
#
# Created: Tue Jun 9 21:46:41 2015
# by: PyQt5 UI code generator 5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Tab(object):
def setupUi(self, Tab):
Tab.setObjectName("Tab")
Tab.resize(762, 523)
self.verticalLayout = QtWidgets.QVBoxLayout(Tab)
self.verticalLayout.setObjectName("verticalLayout")
self.hLayout = QtWidgets.QHBoxLayout()
self.hLayout.setObjectName("hLayout")
self.btn_enterPassword = QtWidgets.QPushButton(Tab)
self.btn_enterPassword.setObjectName("btn_enterPassword")
self.hLayout.addWidget(self.btn_enterPassword)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.hLayout)
self.scrollArea = QtWidgets.QScrollArea(Tab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.retranslateUi(Tab)
QtCore.QMetaObject.connectSlotsByName(Tab)
def retranslateUi(self, Tab):
_translate = QtCore.QCoreApplication.translate
Tab.setWindowTitle(_translate("Tab", "Form"))
self.btn_enterPassword.setText(_translate("Tab", "Enter Password"))
|
normal
|
{
"blob_id": "ef85f94282bfd7c9491c4e28bab61aaab5c792a5",
"index": 232,
"step-1": "<mask token>\n\n\nclass Ui_Tab(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Tab(object):\n <mask token>\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate('Tab', 'Form'))\n self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))\n",
"step-3": "<mask token>\n\n\nclass Ui_Tab(object):\n\n def setupUi(self, Tab):\n Tab.setObjectName('Tab')\n Tab.resize(762, 523)\n self.verticalLayout = QtWidgets.QVBoxLayout(Tab)\n self.verticalLayout.setObjectName('verticalLayout')\n self.hLayout = QtWidgets.QHBoxLayout()\n self.hLayout.setObjectName('hLayout')\n self.btn_enterPassword = QtWidgets.QPushButton(Tab)\n self.btn_enterPassword.setObjectName('btn_enterPassword')\n self.hLayout.addWidget(self.btn_enterPassword)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.\n Expanding, QtWidgets.QSizePolicy.Minimum)\n self.hLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.hLayout)\n self.scrollArea = QtWidgets.QScrollArea(Tab)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName('scrollArea')\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))\n self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')\n self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)\n self.gridLayout.setObjectName('gridLayout')\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.scrollArea)\n self.retranslateUi(Tab)\n QtCore.QMetaObject.connectSlotsByName(Tab)\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate('Tab', 'Form'))\n self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Tab(object):\n\n def setupUi(self, Tab):\n Tab.setObjectName('Tab')\n Tab.resize(762, 523)\n self.verticalLayout = QtWidgets.QVBoxLayout(Tab)\n self.verticalLayout.setObjectName('verticalLayout')\n self.hLayout = QtWidgets.QHBoxLayout()\n self.hLayout.setObjectName('hLayout')\n self.btn_enterPassword = QtWidgets.QPushButton(Tab)\n self.btn_enterPassword.setObjectName('btn_enterPassword')\n self.hLayout.addWidget(self.btn_enterPassword)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.\n Expanding, QtWidgets.QSizePolicy.Minimum)\n self.hLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.hLayout)\n self.scrollArea = QtWidgets.QScrollArea(Tab)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName('scrollArea')\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))\n self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')\n self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)\n self.gridLayout.setObjectName('gridLayout')\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.scrollArea)\n self.retranslateUi(Tab)\n QtCore.QMetaObject.connectSlotsByName(Tab)\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate('Tab', 'Form'))\n self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'src/ui_LibraryTab.ui'\n#\n# Created: Tue Jun 9 21:46:41 2015\n# by: PyQt5 UI code generator 5.4\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Tab(object):\n def setupUi(self, Tab):\n Tab.setObjectName(\"Tab\")\n Tab.resize(762, 523)\n self.verticalLayout = QtWidgets.QVBoxLayout(Tab)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.hLayout = QtWidgets.QHBoxLayout()\n self.hLayout.setObjectName(\"hLayout\")\n self.btn_enterPassword = QtWidgets.QPushButton(Tab)\n self.btn_enterPassword.setObjectName(\"btn_enterPassword\")\n self.hLayout.addWidget(self.btn_enterPassword)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.hLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.hLayout)\n self.scrollArea = QtWidgets.QScrollArea(Tab)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName(\"scrollArea\")\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))\n self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")\n self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.scrollArea)\n\n self.retranslateUi(Tab)\n QtCore.QMetaObject.connectSlotsByName(Tab)\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate(\"Tab\", \"Form\"))\n self.btn_enterPassword.setText(_translate(\"Tab\", \"Enter Password\"))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Test_is_palindrome(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_is_palindrome(TestCase):
def test_is_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome('Asdfdsa'))
self.assertTrue(is_palindrome("asDf'ssfdsa"))
def test_is_palindrome_with_non_alpha(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("asdf'ssfdsa"))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test_is_palindrome(TestCase):
def test_is_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome('Asdfdsa'))
self.assertTrue(is_palindrome("asDf'ssfdsa"))
def test_is_palindrome_with_non_alpha(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("asdf'ssfdsa"))
def test_is_not_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertFalse(is_palindrome('asdfddsa'))
self.assertFalse(is_palindrome('hello world'))
<|reserved_special_token_1|>
from unittest import TestCase
class Test_is_palindrome(TestCase):
def test_is_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome('Asdfdsa'))
self.assertTrue(is_palindrome("asDf'ssfdsa"))
def test_is_palindrome_with_non_alpha(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("asdf'ssfdsa"))
def test_is_not_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertFalse(is_palindrome('asdfddsa'))
self.assertFalse(is_palindrome('hello world'))
<|reserved_special_token_1|>
from unittest import TestCase
# auto-test toggled test class to monitor changes to is_palindrome function
class Test_is_palindrome(TestCase):
def test_is_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("Asdfdsa"))
self.assertTrue(is_palindrome("asDf'ssfdsa"))
def test_is_palindrome_with_non_alpha(self):
from identify_a_palindrome import is_palindrome
self.assertTrue(is_palindrome("asdf'ssfdsa"))
def test_is_not_palindrome(self):
from identify_a_palindrome import is_palindrome
self.assertFalse(is_palindrome("asdfddsa"))
self.assertFalse(is_palindrome("hello world"))
|
flexible
|
{
"blob_id": "785b54dce76d6906df513a8bde0110ab6fd63357",
"index": 7083,
"step-1": "<mask token>\n\n\nclass Test_is_palindrome(TestCase):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_is_palindrome(TestCase):\n\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome('Asdfdsa'))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_is_palindrome(TestCase):\n\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome('Asdfdsa'))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n\n def test_is_not_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertFalse(is_palindrome('asdfddsa'))\n self.assertFalse(is_palindrome('hello world'))\n",
"step-4": "from unittest import TestCase\n\n\nclass Test_is_palindrome(TestCase):\n\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome('Asdfdsa'))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n\n def test_is_not_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertFalse(is_palindrome('asdfddsa'))\n self.assertFalse(is_palindrome('hello world'))\n",
"step-5": "from unittest import TestCase\n\n# auto-test toggled test class to monitor changes to is_palindrome function\nclass Test_is_palindrome(TestCase):\n def test_is_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"Asdfdsa\"))\n self.assertTrue(is_palindrome(\"asDf'ssfdsa\"))\n\n def test_is_palindrome_with_non_alpha(self):\n from identify_a_palindrome import is_palindrome\n self.assertTrue(is_palindrome(\"asdf'ssfdsa\"))\n\n def test_is_not_palindrome(self):\n from identify_a_palindrome import is_palindrome\n self.assertFalse(is_palindrome(\"asdfddsa\"))\n self.assertFalse(is_palindrome(\"hello world\"))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.environ['raco'])
sys.path.append(os.environ['rapl'])
sys.path.append(os.environ['rapl'] + '/timetrace')
<|reserved_special_token_0|>
kwargs_default.update(plot_timey_kwargs_default)
find_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)
<|reserved_special_token_0|>
kw.update(get_quantity_group(kw.groupname, magnetism))
<|reserved_special_token_0|>
if kw.imag:
take_real = False
else:
take_real = True
<|reserved_special_token_0|>
if kw.rad:
datatype = 'mertimerad'
dataname = 'mertimerad'
sampleaxis = di_grid['rr'] / rsun
if kw.mtimerad:
kw.rad = True
radlevs = get_slice_levels(dirname)
datatype = 'mtimerad'
dataname = 'mtimerad'
radlevs = get_slice_levels(dirname)
sampleaxis = radlevs.radius / rsun
datatype += '_mval%03i' % kw.mval
if 'groupname' in kw:
dataname += '_' + kw.groupname
if not kw.rcut is None:
dataname += '_rcut%0.3f' % kw.rcut
if kw.the_file is None:
kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',
dataname)
print('reading ' + kw.the_file)
<|reserved_special_token_0|>
if kw.mtimerad:
samplevals_avail = di['latvals']
else:
samplevals_avail = di['samplevals']
<|reserved_special_token_0|>
times /= time_unit
if not kw.ntot == 'full':
print('ntot = %i' % kw.ntot)
print('before thin_data: len(times) = %i' % len(times))
times = thin_data(times, kw.ntot)
iters = thin_data(iters, kw.ntot)
vals = thin_data(vals, kw.ntot)
print('after thin_data: len(times) = %i' % len(times))
<|reserved_special_token_0|>
if not isall(kw.samplevals):
kw.samplevals = make_array(kw.samplevals)
<|reserved_special_token_0|>
for qval in kw.qvals:
qind = np.argmin(np.abs(qvals_avail - qval))
if take_real:
the_term = np.real(vals[:, :, :, qind])
else:
the_term = np.imag(vals[:, :, :, qind])
terms.append(the_term)
<|reserved_special_token_0|>
if 'ycut' in clas:
margin_right_inches *= 2
<|reserved_special_token_0|>
if not kw.samplevals is None:
if isall(kw.samplevals):
kw.isamplevals = np.arange(len(samplevals_avail))
else:
kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')
for i in range(len(kw.samplevals)):
kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.
samplevals[i]))
for isampleval in kw.isamplevals:
if not kw.shav:
sampleval = samplevals_avail[isampleval]
axislabel = 'latitude (deg)'
samplelabel = '$r/R_\\odot$' + ' = %.3f' % sampleval
position_tag = '_rval%.3f' % sampleval
if kw.rad:
axislabel = '$r/R_\\odot$'
samplelabel = 'lat = ' + lat_format(sampleval)
position_tag = '_lat' + lat_format(sampleval)
maintitle = dirname_stripped
maintitle += '\n' + samplelabel
maintitle += '\nmval = %03i' % kw.mval
if kw.navg is None:
maintitle += '\nt_avg = none'
else:
averaging_time = (times[-1] - times[0]) / len(times) * kw.navg
maintitle += '\n' + 't_avg = %.1f Prot' % averaging_time
print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))
fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=
sub_width_inches, sub_height_inches=sub_height_inches,
margin_left_inches=margin_left_inches, margin_right_inches=
margin_right_inches, margin_top_inches=margin_top_inches,
margin_bottom_inches=margin_bottom_inches)
for iplot in range(nplots):
ax = axs[iplot, 0]
if kw.rad:
field = terms[iplot][:, isampleval, :]
else:
field = terms[iplot][:, :, isampleval]
plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)
ax.set_title(kw.titles[iplot], fontsize=fontsize)
if iplot == nplots - 1:
ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)
if iplot == nplots // 2:
ax.set_ylabel(axislabel, fontsize=fontsize)
fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,
fontsize=fontsize, ha='left', va='bottom')
if clas0['saveplot']:
basename = dataname + '_%08i_%08i' % (iter1, iter2)
plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])
if take_real:
realtag = '_real'
else:
realtag = '_imag'
savename = basename + position_tag + realtag + '.png'
print('saving', plotdir + '/' + savename)
plt.savefig(plotdir + '/' + savename, dpi=200)
if clas0['showplot'] and len(kw.isamplevals) == 1:
plt.show()
else:
plt.close()
print('=======================================')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.environ['raco'])
sys.path.append(os.environ['rapl'])
sys.path.append(os.environ['rapl'] + '/timetrace')
<|reserved_special_token_0|>
fontsize = default_titlesize
args = sys.argv
clas0, clas = read_clas(args)
dirname = clas0['dirname']
dirname_stripped = strip_dirname(dirname)
magnetism = clas0['magnetism']
kwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False,
'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None,
'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})
kwargs_default.update(plot_timey_kwargs_default)
find_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)
kw = update_dict(kwargs_default, clas)
kw.update(get_quantity_group(kw.groupname, magnetism))
kw = update_dict(kw, clas)
kw_plot_timey = update_dict(plot_timey_kwargs_default, clas)
if kw.imag:
take_real = False
else:
take_real = True
time_unit, time_label, rotation, simple_label = get_time_unit(dirname)
di_grid = get_grid_info(dirname)
datatype = 'mertimelat'
dataname = 'mertimelat'
sampleaxis = di_grid['tt_lat']
if kw.rad:
datatype = 'mertimerad'
dataname = 'mertimerad'
sampleaxis = di_grid['rr'] / rsun
if kw.mtimerad:
kw.rad = True
radlevs = get_slice_levels(dirname)
datatype = 'mtimerad'
dataname = 'mtimerad'
radlevs = get_slice_levels(dirname)
sampleaxis = radlevs.radius / rsun
datatype += '_mval%03i' % kw.mval
if 'groupname' in kw:
dataname += '_' + kw.groupname
if not kw.rcut is None:
dataname += '_rcut%0.3f' % kw.rcut
if kw.the_file is None:
kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',
dataname)
print('reading ' + kw.the_file)
di = get_dict(kw.the_file)
vals = di['vals']
times = di['times']
iters = di['iters']
qvals_avail = np.array(di['qvals'])
if kw.mtimerad:
samplevals_avail = di['latvals']
else:
samplevals_avail = di['samplevals']
iter1, iter2 = get_iters_from_file(kw.the_file)
times /= time_unit
if not kw.ntot == 'full':
print('ntot = %i' % kw.ntot)
print('before thin_data: len(times) = %i' % len(times))
times = thin_data(times, kw.ntot)
iters = thin_data(iters, kw.ntot)
vals = thin_data(vals, kw.ntot)
print('after thin_data: len(times) = %i' % len(times))
kw.qvals = make_array(kw.qvals)
kw.isamplevals = make_array(kw.isamplevals)
if not isall(kw.samplevals):
kw.samplevals = make_array(kw.samplevals)
terms = []
for qval in kw.qvals:
qind = np.argmin(np.abs(qvals_avail - qval))
if take_real:
the_term = np.real(vals[:, :, :, qind])
else:
the_term = np.imag(vals[:, :, :, qind])
terms.append(the_term)
sub_width_inches = 7.5
sub_height_inches = 2.0
margin_bottom_inches = 3 / 8
margin_top_inches = 1
margin_left_inches = 5 / 8
margin_right_inches = 7 / 8
if 'ycut' in clas:
margin_right_inches *= 2
nplots = len(terms)
if not kw.samplevals is None:
if isall(kw.samplevals):
kw.isamplevals = np.arange(len(samplevals_avail))
else:
kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')
for i in range(len(kw.samplevals)):
kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.
samplevals[i]))
for isampleval in kw.isamplevals:
if not kw.shav:
sampleval = samplevals_avail[isampleval]
axislabel = 'latitude (deg)'
samplelabel = '$r/R_\\odot$' + ' = %.3f' % sampleval
position_tag = '_rval%.3f' % sampleval
if kw.rad:
axislabel = '$r/R_\\odot$'
samplelabel = 'lat = ' + lat_format(sampleval)
position_tag = '_lat' + lat_format(sampleval)
maintitle = dirname_stripped
maintitle += '\n' + samplelabel
maintitle += '\nmval = %03i' % kw.mval
if kw.navg is None:
maintitle += '\nt_avg = none'
else:
averaging_time = (times[-1] - times[0]) / len(times) * kw.navg
maintitle += '\n' + 't_avg = %.1f Prot' % averaging_time
print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))
fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=
sub_width_inches, sub_height_inches=sub_height_inches,
margin_left_inches=margin_left_inches, margin_right_inches=
margin_right_inches, margin_top_inches=margin_top_inches,
margin_bottom_inches=margin_bottom_inches)
for iplot in range(nplots):
ax = axs[iplot, 0]
if kw.rad:
field = terms[iplot][:, isampleval, :]
else:
field = terms[iplot][:, :, isampleval]
plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)
ax.set_title(kw.titles[iplot], fontsize=fontsize)
if iplot == nplots - 1:
ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)
if iplot == nplots // 2:
ax.set_ylabel(axislabel, fontsize=fontsize)
fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,
fontsize=fontsize, ha='left', va='bottom')
if clas0['saveplot']:
basename = dataname + '_%08i_%08i' % (iter1, iter2)
plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])
if take_real:
realtag = '_real'
else:
realtag = '_imag'
savename = basename + position_tag + realtag + '.png'
print('saving', plotdir + '/' + savename)
plt.savefig(plotdir + '/' + savename, dpi=200)
if clas0['showplot'] and len(kw.isamplevals) == 1:
plt.show()
else:
plt.close()
print('=======================================')
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import sys, os
sys.path.append(os.environ['raco'])
sys.path.append(os.environ['rapl'])
sys.path.append(os.environ['rapl'] + '/timetrace')
from common import *
from cla_util import *
from plotcommon import *
from timey_util import *
fontsize = default_titlesize
args = sys.argv
clas0, clas = read_clas(args)
dirname = clas0['dirname']
dirname_stripped = strip_dirname(dirname)
magnetism = clas0['magnetism']
kwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False,
'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None,
'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})
kwargs_default.update(plot_timey_kwargs_default)
find_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)
kw = update_dict(kwargs_default, clas)
kw.update(get_quantity_group(kw.groupname, magnetism))
kw = update_dict(kw, clas)
kw_plot_timey = update_dict(plot_timey_kwargs_default, clas)
if kw.imag:
take_real = False
else:
take_real = True
time_unit, time_label, rotation, simple_label = get_time_unit(dirname)
di_grid = get_grid_info(dirname)
datatype = 'mertimelat'
dataname = 'mertimelat'
sampleaxis = di_grid['tt_lat']
if kw.rad:
datatype = 'mertimerad'
dataname = 'mertimerad'
sampleaxis = di_grid['rr'] / rsun
if kw.mtimerad:
kw.rad = True
radlevs = get_slice_levels(dirname)
datatype = 'mtimerad'
dataname = 'mtimerad'
radlevs = get_slice_levels(dirname)
sampleaxis = radlevs.radius / rsun
datatype += '_mval%03i' % kw.mval
if 'groupname' in kw:
dataname += '_' + kw.groupname
if not kw.rcut is None:
dataname += '_rcut%0.3f' % kw.rcut
if kw.the_file is None:
kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',
dataname)
print('reading ' + kw.the_file)
di = get_dict(kw.the_file)
vals = di['vals']
times = di['times']
iters = di['iters']
qvals_avail = np.array(di['qvals'])
if kw.mtimerad:
samplevals_avail = di['latvals']
else:
samplevals_avail = di['samplevals']
iter1, iter2 = get_iters_from_file(kw.the_file)
times /= time_unit
if not kw.ntot == 'full':
print('ntot = %i' % kw.ntot)
print('before thin_data: len(times) = %i' % len(times))
times = thin_data(times, kw.ntot)
iters = thin_data(iters, kw.ntot)
vals = thin_data(vals, kw.ntot)
print('after thin_data: len(times) = %i' % len(times))
kw.qvals = make_array(kw.qvals)
kw.isamplevals = make_array(kw.isamplevals)
if not isall(kw.samplevals):
kw.samplevals = make_array(kw.samplevals)
terms = []
for qval in kw.qvals:
qind = np.argmin(np.abs(qvals_avail - qval))
if take_real:
the_term = np.real(vals[:, :, :, qind])
else:
the_term = np.imag(vals[:, :, :, qind])
terms.append(the_term)
sub_width_inches = 7.5
sub_height_inches = 2.0
margin_bottom_inches = 3 / 8
margin_top_inches = 1
margin_left_inches = 5 / 8
margin_right_inches = 7 / 8
if 'ycut' in clas:
margin_right_inches *= 2
nplots = len(terms)
if not kw.samplevals is None:
if isall(kw.samplevals):
kw.isamplevals = np.arange(len(samplevals_avail))
else:
kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')
for i in range(len(kw.samplevals)):
kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.
samplevals[i]))
for isampleval in kw.isamplevals:
if not kw.shav:
sampleval = samplevals_avail[isampleval]
axislabel = 'latitude (deg)'
samplelabel = '$r/R_\\odot$' + ' = %.3f' % sampleval
position_tag = '_rval%.3f' % sampleval
if kw.rad:
axislabel = '$r/R_\\odot$'
samplelabel = 'lat = ' + lat_format(sampleval)
position_tag = '_lat' + lat_format(sampleval)
maintitle = dirname_stripped
maintitle += '\n' + samplelabel
maintitle += '\nmval = %03i' % kw.mval
if kw.navg is None:
maintitle += '\nt_avg = none'
else:
averaging_time = (times[-1] - times[0]) / len(times) * kw.navg
maintitle += '\n' + 't_avg = %.1f Prot' % averaging_time
print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))
fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=
sub_width_inches, sub_height_inches=sub_height_inches,
margin_left_inches=margin_left_inches, margin_right_inches=
margin_right_inches, margin_top_inches=margin_top_inches,
margin_bottom_inches=margin_bottom_inches)
for iplot in range(nplots):
ax = axs[iplot, 0]
if kw.rad:
field = terms[iplot][:, isampleval, :]
else:
field = terms[iplot][:, :, isampleval]
plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)
ax.set_title(kw.titles[iplot], fontsize=fontsize)
if iplot == nplots - 1:
ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)
if iplot == nplots // 2:
ax.set_ylabel(axislabel, fontsize=fontsize)
fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,
fontsize=fontsize, ha='left', va='bottom')
if clas0['saveplot']:
basename = dataname + '_%08i_%08i' % (iter1, iter2)
plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])
if take_real:
realtag = '_real'
else:
realtag = '_imag'
savename = basename + position_tag + realtag + '.png'
print('saving', plotdir + '/' + savename)
plt.savefig(plotdir + '/' + savename, dpi=200)
if clas0['showplot'] and len(kw.isamplevals) == 1:
plt.show()
else:
plt.close()
print('=======================================')
<|reserved_special_token_1|>
# Author: Loren Matilsky
# Date created: 03/02/2019
import matplotlib.pyplot as plt
import numpy as np
import sys, os
sys.path.append(os.environ['raco'])
sys.path.append(os.environ['rapl'])
sys.path.append(os.environ['rapl'] + '/timetrace')
from common import *
from cla_util import *
from plotcommon import *
from timey_util import *
# Set fontsize
fontsize = default_titlesize
# Read command-line arguments (CLAs)
args = sys.argv
clas0, clas = read_clas(args)
dirname = clas0['dirname']
dirname_stripped = strip_dirname(dirname)
# See if magnetism is "on"
magnetism = clas0['magnetism']
# defaults
kwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False, 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None, 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})
kwargs_default.update(plot_timey_kwargs_default)
# check for bad keys
find_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)
# overwrite defaults
kw = update_dict(kwargs_default, clas)
# add in groupname keys
kw.update(get_quantity_group(kw.groupname, magnetism))
# user may have wanted to change some groupname keys
kw = update_dict(kw, clas)
kw_plot_timey = update_dict(plot_timey_kwargs_default, clas)
# check if we want the real or imaginary vals
if kw.imag:
take_real = False
else:
take_real = True
# baseline time unit
time_unit, time_label, rotation, simple_label = get_time_unit(dirname)
# get grid info
di_grid = get_grid_info(dirname)
datatype = 'mertimelat'
dataname = 'mertimelat'
sampleaxis = di_grid['tt_lat']
if kw.rad:
datatype = 'mertimerad'
dataname = 'mertimerad'
sampleaxis = di_grid['rr']/rsun
if kw.mtimerad:
kw.rad = True
radlevs = get_slice_levels(dirname)
datatype = 'mtimerad'
dataname = 'mtimerad'
radlevs = get_slice_levels(dirname)
sampleaxis = radlevs.radius/rsun
datatype += '_mval%03i' %kw.mval
if 'groupname' in kw:
dataname += '_' + kw.groupname
if not kw.rcut is None:
dataname += '_rcut%0.3f' %kw.rcut
#dataname += clas0['tag']
# get data
if kw.the_file is None:
kw.the_file = get_widest_range_file(clas0['datadir'] +\
datatype + '/', dataname)
# Read in the data
print ('reading ' + kw.the_file)
di = get_dict(kw.the_file)
vals = di['vals']
times = di['times']
iters = di['iters']
qvals_avail = np.array(di['qvals'])
if kw.mtimerad:
samplevals_avail = di['latvals']
else:
samplevals_avail = di['samplevals']
iter1, iter2 = get_iters_from_file(kw.the_file)
times /= time_unit
# maybe thin data
if not kw.ntot == 'full':
print ("ntot = %i" %kw.ntot)
print ("before thin_data: len(times) = %i" %len(times))
times = thin_data(times, kw.ntot)
iters = thin_data(iters, kw.ntot)
vals = thin_data(vals, kw.ntot)
print ("after thin_data: len(times) = %i" %len(times))
# these all need to be arrays
kw.qvals = make_array(kw.qvals)
kw.isamplevals = make_array(kw.isamplevals)
if not isall(kw.samplevals):
kw.samplevals = make_array(kw.samplevals)
# get raw traces of desired variables
terms = []
for qval in kw.qvals:
qind = np.argmin(np.abs(qvals_avail - qval))
if take_real:
the_term = np.real(vals[:, :, :, qind])
else:
the_term = np.imag(vals[:, :, :, qind])
terms.append(the_term)
# set figure dimensions
sub_width_inches = 7.5
sub_height_inches = 2.0
margin_bottom_inches = 3/8 # space for x-axis label
margin_top_inches = 1
margin_left_inches = 5/8 # space for latitude label
margin_right_inches = 7/8 # space for colorbar
if 'ycut' in clas:
margin_right_inches *= 2
nplots = len(terms)
# determine desired levels to plot
if not kw.samplevals is None: # isamplevals being set indirectly
# check for special 'all' option
if isall(kw.samplevals):
kw.isamplevals = np.arange(len(samplevals_avail))
else:
kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')
for i in range(len(kw.samplevals)):
kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.samplevals[i]))
# Loop over the desired levels and save plots
for isampleval in kw.isamplevals:
if not kw.shav:
sampleval = samplevals_avail[isampleval]
# set some labels
axislabel = 'latitude (deg)'
samplelabel = r'$r/R_\odot$' + ' = %.3f' %sampleval
position_tag = '_rval%.3f' %sampleval
if kw.rad:
axislabel = r'$r/R_\odot$'
samplelabel = 'lat = ' + lat_format(sampleval)
position_tag = '_lat' + lat_format(sampleval)
# Put some useful information on the title
maintitle = dirname_stripped
maintitle += '\n' + samplelabel
maintitle += '\nmval = %03i' %kw.mval
if kw.navg is None:
maintitle += '\nt_avg = none'
else:
averaging_time = (times[-1] - times[0])/len(times)*kw.navg
maintitle += '\n' + ('t_avg = %.1f Prot' %averaging_time)
print('plotting sampleval = %0.3f (i = %02i)' %(sampleval, isampleval))
# make plot
fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=sub_width_inches, sub_height_inches=sub_height_inches, margin_left_inches=margin_left_inches, margin_right_inches=margin_right_inches, margin_top_inches=margin_top_inches, margin_bottom_inches=margin_bottom_inches)
for iplot in range(nplots):
ax = axs[iplot, 0]
if kw.rad:
field = terms[iplot][:, isampleval, :]
else:
field = terms[iplot][:, :, isampleval]
plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)
# title the plot
ax.set_title(kw.titles[iplot], fontsize=fontsize)
# Turn the x tick labels off for the top strips
#if iplot < nplots - 1:
# ax.set_xticklabels([])
# Put time label on bottom strip
if iplot == nplots - 1:
ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)
# Put ylabel on middle strip
if iplot == nplots//2:
ax.set_ylabel(axislabel, fontsize=fontsize)
fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle, fontsize=fontsize, ha='left', va='bottom')
# Save the plot
if clas0['saveplot']:
# Make appropriate file name to save
# save the figure
basename = dataname + '_%08i_%08i' %(iter1, iter2)
plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])
if take_real:
realtag = '_real'
else:
realtag = '_imag'
savename = basename + position_tag + realtag + '.png'
print ("saving", plotdir + '/' + savename)
plt.savefig(plotdir + '/' + savename, dpi=200)
# Show the plot if only plotting at one latitude
if clas0['showplot'] and len(kw.isamplevals) == 1:
plt.show()
else:
plt.close()
print ("=======================================")
|
flexible
|
{
"blob_id": "97a059d6d34b924a0512ebe6ff5ab1d5ccc072d5",
"index": 8966,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\n<mask token>\nkwargs_default.update(plot_timey_kwargs_default)\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\n<mask token>\nkw.update(get_quantity_group(kw.groupname, magnetism))\n<mask token>\nif kw.imag:\n take_real = False\nelse:\n take_real = True\n<mask token>\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr'] / rsun\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius / rsun\ndatatype += '_mval%03i' % kw.mval\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' % kw.rcut\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',\n dataname)\nprint('reading ' + kw.the_file)\n<mask token>\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals']\n<mask token>\ntimes /= time_unit\nif not kw.ntot == 'full':\n print('ntot = %i' % kw.ntot)\n print('before thin_data: len(times) = %i' % len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print('after thin_data: len(times) = %i' % len(times))\n<mask token>\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\n<mask token>\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\n<mask token>\nif 'ycut' in clas:\n margin_right_inches *= 2\n<mask token>\nif not kw.samplevals is None:\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.\n samplevals[i]))\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n axislabel = 'latitude (deg)'\n samplelabel = '$r/R_\\\\odot$' + ' = %.3f' % sampleval\n position_tag = '_rval%.3f' % sampleval\n if kw.rad:\n axislabel = '$r/R_\\\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n maintitle = dirname_stripped\n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' % kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0]) / len(times) * kw.navg\n maintitle += '\\n' + 't_avg = %.1f Prot' % averaging_time\n print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=\n sub_width_inches, sub_height_inches=sub_height_inches,\n margin_left_inches=margin_left_inches, margin_right_inches=\n margin_right_inches, margin_top_inches=margin_top_inches,\n margin_bottom_inches=margin_bottom_inches)\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n if iplot == nplots // 2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,\n fontsize=fontsize, ha='left', va='bottom')\n if clas0['saveplot']:\n basename = dataname + '_%08i_%08i' % (iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print('saving', plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print('=======================================')\n",
"step-3": "<mask token>\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\n<mask token>\nfontsize = default_titlesize\nargs = sys.argv\nclas0, clas = read_clas(args)\ndirname = clas0['dirname']\ndirname_stripped = strip_dirname(dirname)\nmagnetism = clas0['magnetism']\nkwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False,\n 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None,\n 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})\nkwargs_default.update(plot_timey_kwargs_default)\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\nkw = update_dict(kwargs_default, clas)\nkw.update(get_quantity_group(kw.groupname, magnetism))\nkw = update_dict(kw, clas)\nkw_plot_timey = update_dict(plot_timey_kwargs_default, clas)\nif kw.imag:\n take_real = False\nelse:\n take_real = True\ntime_unit, time_label, rotation, simple_label = get_time_unit(dirname)\ndi_grid = get_grid_info(dirname)\ndatatype = 'mertimelat'\ndataname = 'mertimelat'\nsampleaxis = di_grid['tt_lat']\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr'] / rsun\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius / rsun\ndatatype += '_mval%03i' % kw.mval\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' % kw.rcut\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',\n dataname)\nprint('reading ' + kw.the_file)\ndi = get_dict(kw.the_file)\nvals = di['vals']\ntimes = di['times']\niters = di['iters']\nqvals_avail = np.array(di['qvals'])\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals']\niter1, iter2 = get_iters_from_file(kw.the_file)\ntimes /= time_unit\nif not kw.ntot == 'full':\n print('ntot = %i' % kw.ntot)\n print('before thin_data: len(times) = %i' % len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print('after thin_data: len(times) = %i' % len(times))\nkw.qvals = make_array(kw.qvals)\nkw.isamplevals = make_array(kw.isamplevals)\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\nterms = []\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\nsub_width_inches = 7.5\nsub_height_inches = 2.0\nmargin_bottom_inches = 3 / 8\nmargin_top_inches = 1\nmargin_left_inches = 5 / 8\nmargin_right_inches = 7 / 8\nif 'ycut' in clas:\n margin_right_inches *= 2\nnplots = len(terms)\nif not kw.samplevals is None:\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.\n samplevals[i]))\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n axislabel = 'latitude (deg)'\n samplelabel = '$r/R_\\\\odot$' + ' = %.3f' % sampleval\n position_tag = '_rval%.3f' % sampleval\n if kw.rad:\n axislabel = '$r/R_\\\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n maintitle = dirname_stripped\n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' % kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0]) / len(times) * kw.navg\n maintitle += '\\n' + 't_avg = %.1f Prot' % averaging_time\n print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=\n sub_width_inches, sub_height_inches=sub_height_inches,\n margin_left_inches=margin_left_inches, margin_right_inches=\n margin_right_inches, margin_top_inches=margin_top_inches,\n margin_bottom_inches=margin_bottom_inches)\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n if iplot == nplots // 2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,\n fontsize=fontsize, ha='left', va='bottom')\n if clas0['saveplot']:\n basename = dataname + '_%08i_%08i' % (iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print('saving', plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print('=======================================')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport sys, os\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\nfrom common import *\nfrom cla_util import *\nfrom plotcommon import *\nfrom timey_util import *\nfontsize = default_titlesize\nargs = sys.argv\nclas0, clas = read_clas(args)\ndirname = clas0['dirname']\ndirname_stripped = strip_dirname(dirname)\nmagnetism = clas0['magnetism']\nkwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False,\n 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None,\n 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})\nkwargs_default.update(plot_timey_kwargs_default)\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\nkw = update_dict(kwargs_default, clas)\nkw.update(get_quantity_group(kw.groupname, magnetism))\nkw = update_dict(kw, clas)\nkw_plot_timey = update_dict(plot_timey_kwargs_default, clas)\nif kw.imag:\n take_real = False\nelse:\n take_real = True\ntime_unit, time_label, rotation, simple_label = get_time_unit(dirname)\ndi_grid = get_grid_info(dirname)\ndatatype = 'mertimelat'\ndataname = 'mertimelat'\nsampleaxis = di_grid['tt_lat']\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr'] / rsun\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius / rsun\ndatatype += '_mval%03i' % kw.mval\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' % kw.rcut\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',\n dataname)\nprint('reading ' + kw.the_file)\ndi = get_dict(kw.the_file)\nvals = di['vals']\ntimes = di['times']\niters = di['iters']\nqvals_avail = np.array(di['qvals'])\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals']\niter1, iter2 = get_iters_from_file(kw.the_file)\ntimes /= time_unit\nif not kw.ntot == 'full':\n print('ntot = %i' % kw.ntot)\n print('before thin_data: len(times) = %i' % len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print('after thin_data: len(times) = %i' % len(times))\nkw.qvals = make_array(kw.qvals)\nkw.isamplevals = make_array(kw.isamplevals)\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\nterms = []\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\nsub_width_inches = 7.5\nsub_height_inches = 2.0\nmargin_bottom_inches = 3 / 8\nmargin_top_inches = 1\nmargin_left_inches = 5 / 8\nmargin_right_inches = 7 / 8\nif 'ycut' in clas:\n margin_right_inches *= 2\nnplots = len(terms)\nif not kw.samplevals is None:\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.\n samplevals[i]))\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n axislabel = 'latitude (deg)'\n samplelabel = '$r/R_\\\\odot$' + ' = %.3f' % sampleval\n position_tag = '_rval%.3f' % sampleval\n if kw.rad:\n axislabel = '$r/R_\\\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n maintitle = dirname_stripped\n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' % kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0]) / len(times) * kw.navg\n maintitle += '\\n' + 't_avg = %.1f Prot' % averaging_time\n print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=\n sub_width_inches, sub_height_inches=sub_height_inches,\n margin_left_inches=margin_left_inches, margin_right_inches=\n margin_right_inches, margin_top_inches=margin_top_inches,\n margin_bottom_inches=margin_bottom_inches)\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n if iplot == nplots // 2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,\n fontsize=fontsize, ha='left', va='bottom')\n if clas0['saveplot']:\n basename = dataname + '_%08i_%08i' % (iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print('saving', plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print('=======================================')\n",
"step-5": "# Author: Loren Matilsky\n# Date created: 03/02/2019\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys, os\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\nfrom common import *\nfrom cla_util import *\nfrom plotcommon import *\nfrom timey_util import *\n\n# Set fontsize\nfontsize = default_titlesize\n\n# Read command-line arguments (CLAs)\nargs = sys.argv\nclas0, clas = read_clas(args)\ndirname = clas0['dirname']\ndirname_stripped = strip_dirname(dirname)\n# See if magnetism is \"on\"\nmagnetism = clas0['magnetism']\n\n# defaults\nkwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False, 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None, 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})\nkwargs_default.update(plot_timey_kwargs_default)\n\n# check for bad keys\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\n\n# overwrite defaults\nkw = update_dict(kwargs_default, clas)\n# add in groupname keys\nkw.update(get_quantity_group(kw.groupname, magnetism))\n# user may have wanted to change some groupname keys\nkw = update_dict(kw, clas)\nkw_plot_timey = update_dict(plot_timey_kwargs_default, clas)\n\n# check if we want the real or imaginary vals\nif kw.imag:\n take_real = False\nelse:\n take_real = True\n\n# baseline time unit\ntime_unit, time_label, rotation, simple_label = get_time_unit(dirname)\n\n# get grid info\ndi_grid = get_grid_info(dirname)\n\ndatatype = 'mertimelat'\ndataname = 'mertimelat'\nsampleaxis = di_grid['tt_lat']\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr']/rsun\n\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius/rsun\n\ndatatype += '_mval%03i' %kw.mval\n\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' %kw.rcut\n\n#dataname += clas0['tag']\n# get data\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] +\\\n datatype + '/', dataname)\n\n# Read in the data\nprint ('reading ' + kw.the_file)\ndi = get_dict(kw.the_file)\nvals = di['vals']\ntimes = di['times']\niters = di['iters']\nqvals_avail = np.array(di['qvals'])\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals'] \n\niter1, iter2 = get_iters_from_file(kw.the_file)\ntimes /= time_unit\n\n# maybe thin data\nif not kw.ntot == 'full':\n print (\"ntot = %i\" %kw.ntot)\n print (\"before thin_data: len(times) = %i\" %len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print (\"after thin_data: len(times) = %i\" %len(times))\n\n# these all need to be arrays\nkw.qvals = make_array(kw.qvals)\nkw.isamplevals = make_array(kw.isamplevals)\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\n\n# get raw traces of desired variables\nterms = []\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\n\n# set figure dimensions\nsub_width_inches = 7.5\nsub_height_inches = 2.0\nmargin_bottom_inches = 3/8 # space for x-axis label\nmargin_top_inches = 1\nmargin_left_inches = 5/8 # space for latitude label\nmargin_right_inches = 7/8 # space for colorbar\nif 'ycut' in clas:\n margin_right_inches *= 2\nnplots = len(terms)\n\n# determine desired levels to plot\nif not kw.samplevals is None: # isamplevals being set indirectly\n # check for special 'all' option\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.samplevals[i]))\n\n# Loop over the desired levels and save plots\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n\n # set some labels \n axislabel = 'latitude (deg)'\n samplelabel = r'$r/R_\\odot$' + ' = %.3f' %sampleval\n position_tag = '_rval%.3f' %sampleval\n if kw.rad:\n axislabel = r'$r/R_\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n\n # Put some useful information on the title\n maintitle = dirname_stripped \n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' %kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0])/len(times)*kw.navg\n maintitle += '\\n' + ('t_avg = %.1f Prot' %averaging_time)\n\n print('plotting sampleval = %0.3f (i = %02i)' %(sampleval, isampleval))\n \n # make plot\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=sub_width_inches, sub_height_inches=sub_height_inches, margin_left_inches=margin_left_inches, margin_right_inches=margin_right_inches, margin_top_inches=margin_top_inches, margin_bottom_inches=margin_bottom_inches)\n\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n \n # title the plot\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n\n # Turn the x tick labels off for the top strips\n #if iplot < nplots - 1:\n # ax.set_xticklabels([])\n # Put time label on bottom strip \n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n # Put ylabel on middle strip\n if iplot == nplots//2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle, fontsize=fontsize, ha='left', va='bottom')\n\n # Save the plot\n if clas0['saveplot']:\n # Make appropriate file name to save\n\n # save the figure\n basename = dataname + '_%08i_%08i' %(iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print (\"saving\", plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n\n # Show the plot if only plotting at one latitude\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print (\"=======================================\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
async def sound(cube):
sound = bytearray()
sound.append(2)
sound.append(9)
sound.append(255)
await cube.write_gatt_char(TOIO_SOUND_UUID, sound)
async def motor(cube):
motor = bytearray()
motor.append(2)
motor.append(1)
motor.append(1)
motor.append(16)
motor.append(2)
motor.append(1)
motor.append(16)
motor.append(64)
await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)
async def connect_to_cube(d):
print('try to connect %s' % d.address)
async with BleakClient(d.address) as cube:
connected = cube.is_connected
if not connected:
print('%s is not connected' % d.address)
return
print('%s connected' % d.address)
services = cube.services
for service in services:
print(service.uuid)
if service.uuid == TOIO_SERVICE_UUID:
cubes.append(cube)
print('toio core cube(%d): %s' % (len(cubes), connected))
print(' Address: ', d.address)
for char in service.characteristics:
print(' Characteristic: ', char)
await sound(cube)
await motor(cube)
async def search_cube():
devices = await BleakScanner.discover(timeout=5.0)
for i, d in enumerate(devices):
print('device %d' % i)
try:
await connect_to_cube(d)
except exc.BleakError as e:
print(e)
except AttributeError as e:
pass
async def main(argv):
print('search toio core cube')
await search_cube()
if len(cubes) == 0:
print('sorry, no cubes are found')
return 0
if __name__ == '__main__':
asyncio.run(main(sys.argv))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TOIO_SERVICE_UUID = '10B20100-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_BATTERY_UUID = '10B20108-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_BUTTON_UUID = '10B20107-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_SOUND_UUID = '10B20104-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_MOTOR_UUID = '10B20102-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
cubes = []
async def sound(cube):
sound = bytearray()
sound.append(2)
sound.append(9)
sound.append(255)
await cube.write_gatt_char(TOIO_SOUND_UUID, sound)
async def motor(cube):
motor = bytearray()
motor.append(2)
motor.append(1)
motor.append(1)
motor.append(16)
motor.append(2)
motor.append(1)
motor.append(16)
motor.append(64)
await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)
async def connect_to_cube(d):
print('try to connect %s' % d.address)
async with BleakClient(d.address) as cube:
connected = cube.is_connected
if not connected:
print('%s is not connected' % d.address)
return
print('%s connected' % d.address)
services = cube.services
for service in services:
print(service.uuid)
if service.uuid == TOIO_SERVICE_UUID:
cubes.append(cube)
print('toio core cube(%d): %s' % (len(cubes), connected))
print(' Address: ', d.address)
for char in service.characteristics:
print(' Characteristic: ', char)
await sound(cube)
await motor(cube)
async def search_cube():
devices = await BleakScanner.discover(timeout=5.0)
for i, d in enumerate(devices):
print('device %d' % i)
try:
await connect_to_cube(d)
except exc.BleakError as e:
print(e)
except AttributeError as e:
pass
async def main(argv):
print('search toio core cube')
await search_cube()
if len(cubes) == 0:
print('sorry, no cubes are found')
return 0
if __name__ == '__main__':
asyncio.run(main(sys.argv))
<|reserved_special_token_1|>
import sys
import asyncio
from bleak import BleakScanner
from bleak import BleakClient
from bleak import exc
TOIO_SERVICE_UUID = '10B20100-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_BATTERY_UUID = '10B20108-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_BUTTON_UUID = '10B20107-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_SOUND_UUID = '10B20104-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
TOIO_MOTOR_UUID = '10B20102-5B3B-4571-9508-CF3EFCD7BBAE'.lower()
cubes = []
async def sound(cube):
sound = bytearray()
sound.append(2)
sound.append(9)
sound.append(255)
await cube.write_gatt_char(TOIO_SOUND_UUID, sound)
async def motor(cube):
motor = bytearray()
motor.append(2)
motor.append(1)
motor.append(1)
motor.append(16)
motor.append(2)
motor.append(1)
motor.append(16)
motor.append(64)
await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)
async def connect_to_cube(d):
print('try to connect %s' % d.address)
async with BleakClient(d.address) as cube:
connected = cube.is_connected
if not connected:
print('%s is not connected' % d.address)
return
print('%s connected' % d.address)
services = cube.services
for service in services:
print(service.uuid)
if service.uuid == TOIO_SERVICE_UUID:
cubes.append(cube)
print('toio core cube(%d): %s' % (len(cubes), connected))
print(' Address: ', d.address)
for char in service.characteristics:
print(' Characteristic: ', char)
await sound(cube)
await motor(cube)
async def search_cube():
devices = await BleakScanner.discover(timeout=5.0)
for i, d in enumerate(devices):
print('device %d' % i)
try:
await connect_to_cube(d)
except exc.BleakError as e:
print(e)
except AttributeError as e:
pass
async def main(argv):
print('search toio core cube')
await search_cube()
if len(cubes) == 0:
print('sorry, no cubes are found')
return 0
if __name__ == '__main__':
asyncio.run(main(sys.argv))
<|reserved_special_token_1|>
#!/usr/bin/env python3
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import asyncio
from bleak import BleakScanner
from bleak import BleakClient
from bleak import exc
TOIO_SERVICE_UUID = "10B20100-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_BATTERY_UUID = "10B20108-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_BUTTON_UUID = "10B20107-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_SOUND_UUID = "10B20104-5B3B-4571-9508-CF3EFCD7BBAE".lower()
TOIO_MOTOR_UUID = "10B20102-5B3B-4571-9508-CF3EFCD7BBAE".lower()
cubes = []
async def sound(cube):
sound = bytearray()
sound.append(0x02)
sound.append(9)
sound.append(0xff)
await cube.write_gatt_char(TOIO_SOUND_UUID, sound)
async def motor(cube):
motor = bytearray()
motor.append(0x02)
motor.append(0x01)
motor.append(0x01)
motor.append(0x10)
motor.append(0x02)
motor.append(0x01)
motor.append(0x10)
motor.append(0x40)
await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)
async def connect_to_cube(d):
print('try to connect %s' % d.address)
async with BleakClient(d.address) as cube:
connected = cube.is_connected
if not connected:
print('%s is not connected' % d.address)
return
print('%s connected' % d.address)
services = cube.services
for service in services:
print(service.uuid)
if service.uuid == TOIO_SERVICE_UUID:
cubes.append(cube)
print('toio core cube(%d): %s' % (len(cubes), connected))
print(' Address: ', d.address)
for char in service.characteristics:
print(' Characteristic: ', char)
await sound(cube)
await motor(cube)
async def search_cube():
devices = await BleakScanner.discover(timeout=5.0)
for i, d in enumerate(devices):
print('device %d' % i)
try:
await connect_to_cube(d)
except exc.BleakError as e:
print(e)
except AttributeError as e:
pass
async def main(argv):
print('search toio core cube')
await search_cube()
if len(cubes) == 0:
print('sorry, no cubes are found')
return 0
if __name__ == '__main__':
asyncio.run(main(sys.argv))
|
flexible
|
{
"blob_id": "923a433a3a04a8538b43d162d17d379daab4698a",
"index": 7753,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def sound(cube):\n sound = bytearray()\n sound.append(2)\n sound.append(9)\n sound.append(255)\n await cube.write_gatt_char(TOIO_SOUND_UUID, sound)\n\n\nasync def motor(cube):\n motor = bytearray()\n motor.append(2)\n motor.append(1)\n motor.append(1)\n motor.append(16)\n motor.append(2)\n motor.append(1)\n motor.append(16)\n motor.append(64)\n await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)\n\n\nasync def connect_to_cube(d):\n print('try to connect %s' % d.address)\n async with BleakClient(d.address) as cube:\n connected = cube.is_connected\n if not connected:\n print('%s is not connected' % d.address)\n return\n print('%s connected' % d.address)\n services = cube.services\n for service in services:\n print(service.uuid)\n if service.uuid == TOIO_SERVICE_UUID:\n cubes.append(cube)\n print('toio core cube(%d): %s' % (len(cubes), connected))\n print(' Address: ', d.address)\n for char in service.characteristics:\n print(' Characteristic: ', char)\n await sound(cube)\n await motor(cube)\n\n\nasync def search_cube():\n devices = await BleakScanner.discover(timeout=5.0)\n for i, d in enumerate(devices):\n print('device %d' % i)\n try:\n await connect_to_cube(d)\n except exc.BleakError as e:\n print(e)\n except AttributeError as e:\n pass\n\n\nasync def main(argv):\n print('search toio core cube')\n await search_cube()\n if len(cubes) == 0:\n print('sorry, no cubes are found')\n return 0\n\n\nif __name__ == '__main__':\n asyncio.run(main(sys.argv))\n",
"step-3": "<mask token>\nTOIO_SERVICE_UUID = '10B20100-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_BATTERY_UUID = '10B20108-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_BUTTON_UUID = '10B20107-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_SOUND_UUID = '10B20104-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_MOTOR_UUID = '10B20102-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\ncubes = []\n\n\nasync def sound(cube):\n sound = bytearray()\n sound.append(2)\n sound.append(9)\n sound.append(255)\n await cube.write_gatt_char(TOIO_SOUND_UUID, sound)\n\n\nasync def motor(cube):\n motor = bytearray()\n motor.append(2)\n motor.append(1)\n motor.append(1)\n motor.append(16)\n motor.append(2)\n motor.append(1)\n motor.append(16)\n motor.append(64)\n await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)\n\n\nasync def connect_to_cube(d):\n print('try to connect %s' % d.address)\n async with BleakClient(d.address) as cube:\n connected = cube.is_connected\n if not connected:\n print('%s is not connected' % d.address)\n return\n print('%s connected' % d.address)\n services = cube.services\n for service in services:\n print(service.uuid)\n if service.uuid == TOIO_SERVICE_UUID:\n cubes.append(cube)\n print('toio core cube(%d): %s' % (len(cubes), connected))\n print(' Address: ', d.address)\n for char in service.characteristics:\n print(' Characteristic: ', char)\n await sound(cube)\n await motor(cube)\n\n\nasync def search_cube():\n devices = await BleakScanner.discover(timeout=5.0)\n for i, d in enumerate(devices):\n print('device %d' % i)\n try:\n await connect_to_cube(d)\n except exc.BleakError as e:\n print(e)\n except AttributeError as e:\n pass\n\n\nasync def main(argv):\n print('search toio core cube')\n await search_cube()\n if len(cubes) == 0:\n print('sorry, no cubes are found')\n return 0\n\n\nif __name__ == '__main__':\n asyncio.run(main(sys.argv))\n",
"step-4": "import sys\nimport asyncio\nfrom bleak import BleakScanner\nfrom bleak import BleakClient\nfrom bleak import exc\nTOIO_SERVICE_UUID = '10B20100-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_BATTERY_UUID = '10B20108-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_BUTTON_UUID = '10B20107-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_SOUND_UUID = '10B20104-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\nTOIO_MOTOR_UUID = '10B20102-5B3B-4571-9508-CF3EFCD7BBAE'.lower()\ncubes = []\n\n\nasync def sound(cube):\n sound = bytearray()\n sound.append(2)\n sound.append(9)\n sound.append(255)\n await cube.write_gatt_char(TOIO_SOUND_UUID, sound)\n\n\nasync def motor(cube):\n motor = bytearray()\n motor.append(2)\n motor.append(1)\n motor.append(1)\n motor.append(16)\n motor.append(2)\n motor.append(1)\n motor.append(16)\n motor.append(64)\n await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)\n\n\nasync def connect_to_cube(d):\n print('try to connect %s' % d.address)\n async with BleakClient(d.address) as cube:\n connected = cube.is_connected\n if not connected:\n print('%s is not connected' % d.address)\n return\n print('%s connected' % d.address)\n services = cube.services\n for service in services:\n print(service.uuid)\n if service.uuid == TOIO_SERVICE_UUID:\n cubes.append(cube)\n print('toio core cube(%d): %s' % (len(cubes), connected))\n print(' Address: ', d.address)\n for char in service.characteristics:\n print(' Characteristic: ', char)\n await sound(cube)\n await motor(cube)\n\n\nasync def search_cube():\n devices = await BleakScanner.discover(timeout=5.0)\n for i, d in enumerate(devices):\n print('device %d' % i)\n try:\n await connect_to_cube(d)\n except exc.BleakError as e:\n print(e)\n except AttributeError as e:\n pass\n\n\nasync def main(argv):\n print('search toio core cube')\n await search_cube()\n if len(cubes) == 0:\n print('sorry, no cubes are found')\n return 0\n\n\nif __name__ == '__main__':\n asyncio.run(main(sys.argv))\n",
"step-5": "#!/usr/bin/env python3\r\n\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\r\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n\r\nimport sys\r\nimport asyncio\r\nfrom bleak import BleakScanner\r\nfrom bleak import BleakClient\r\nfrom bleak import exc\r\n\r\nTOIO_SERVICE_UUID = \"10B20100-5B3B-4571-9508-CF3EFCD7BBAE\".lower()\r\nTOIO_BATTERY_UUID = \"10B20108-5B3B-4571-9508-CF3EFCD7BBAE\".lower()\r\nTOIO_BUTTON_UUID = \"10B20107-5B3B-4571-9508-CF3EFCD7BBAE\".lower()\r\nTOIO_SOUND_UUID = \"10B20104-5B3B-4571-9508-CF3EFCD7BBAE\".lower()\r\nTOIO_MOTOR_UUID = \"10B20102-5B3B-4571-9508-CF3EFCD7BBAE\".lower()\r\n\r\ncubes = []\r\n\r\nasync def sound(cube):\r\n sound = bytearray()\r\n sound.append(0x02)\r\n sound.append(9)\r\n sound.append(0xff)\r\n await cube.write_gatt_char(TOIO_SOUND_UUID, sound)\r\n\r\n\r\nasync def motor(cube):\r\n motor = bytearray()\r\n motor.append(0x02)\r\n motor.append(0x01)\r\n motor.append(0x01)\r\n motor.append(0x10)\r\n motor.append(0x02)\r\n motor.append(0x01)\r\n motor.append(0x10)\r\n motor.append(0x40)\r\n await cube.write_gatt_char(TOIO_MOTOR_UUID, motor)\r\n\r\nasync def connect_to_cube(d):\r\n print('try to connect %s' % d.address)\r\n async with BleakClient(d.address) as cube:\r\n connected = cube.is_connected\r\n if not connected:\r\n print('%s is not connected' % d.address)\r\n return\r\n print('%s connected' % d.address)\r\n services = cube.services\r\n for service in services:\r\n print(service.uuid)\r\n if service.uuid == TOIO_SERVICE_UUID:\r\n cubes.append(cube)\r\n print('toio core cube(%d): %s' % (len(cubes), connected))\r\n print(' Address: ', d.address)\r\n for char in service.characteristics:\r\n print(' Characteristic: ', char)\r\n await sound(cube)\r\n await motor(cube)\r\n\r\nasync def search_cube():\r\n devices = await BleakScanner.discover(timeout=5.0)\r\n for i, d in enumerate(devices):\r\n print('device %d' % i)\r\n try:\r\n await connect_to_cube(d)\r\n except exc.BleakError as e:\r\n print(e)\r\n except AttributeError as e:\r\n pass\r\n\r\n\r\nasync def main(argv):\r\n print('search toio core cube')\r\n await search_cube()\r\n if len(cubes) == 0:\r\n print('sorry, no cubes are found')\r\n return 0\r\n\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main(sys.argv))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in stringy:
if ord(i) >= 65 and ord(i) <= 90:
temp = (ord(i) + k - 65) % 26
s += chr(temp + 65)
elif ord(i) >= 97 and ord(i) <= 122:
temp = (ord(i) + k - 97) % 26
s += chr(temp + 97)
else:
s += i
print(s)
<|reserved_special_token_1|>
n = int(input())
stringy = input()
k = int(input())
s = ''
for i in stringy:
if ord(i) >= 65 and ord(i) <= 90:
temp = (ord(i) + k - 65) % 26
s += chr(temp + 65)
elif ord(i) >= 97 and ord(i) <= 122:
temp = (ord(i) + k - 97) % 26
s += chr(temp + 97)
else:
s += i
print(s)
<|reserved_special_token_1|>
#https://www.hackerrank.com/challenges/caesar-cipher-1/problem
n=int(input())
stringy=input()
k=int(input())
s=""
for i in stringy:
if ord(i)>=65 and ord(i)<=90:
temp=(ord(i)+k-65)%26
s+=chr(temp+65)
elif ord(i)>=97 and ord(i)<=122:
temp=(ord(i)+k-97)%26
s+=chr(temp+97)
else:
s+=i
print(s)
|
flexible
|
{
"blob_id": "acf787885834961a71fb2655b9d8a1eb026942c7",
"index": 4089,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in stringy:\n if ord(i) >= 65 and ord(i) <= 90:\n temp = (ord(i) + k - 65) % 26\n s += chr(temp + 65)\n elif ord(i) >= 97 and ord(i) <= 122:\n temp = (ord(i) + k - 97) % 26\n s += chr(temp + 97)\n else:\n s += i\nprint(s)\n",
"step-3": "n = int(input())\nstringy = input()\nk = int(input())\ns = ''\nfor i in stringy:\n if ord(i) >= 65 and ord(i) <= 90:\n temp = (ord(i) + k - 65) % 26\n s += chr(temp + 65)\n elif ord(i) >= 97 and ord(i) <= 122:\n temp = (ord(i) + k - 97) % 26\n s += chr(temp + 97)\n else:\n s += i\nprint(s)\n",
"step-4": "#https://www.hackerrank.com/challenges/caesar-cipher-1/problem\n\nn=int(input())\nstringy=input()\nk=int(input())\ns=\"\"\nfor i in stringy:\n if ord(i)>=65 and ord(i)<=90:\n temp=(ord(i)+k-65)%26\n s+=chr(temp+65)\n elif ord(i)>=97 and ord(i)<=122:\n temp=(ord(i)+k-97)%26\n s+=chr(temp+97)\n else:\n s+=i\nprint(s)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from threading import Thread
import time
def sleeping():
time.sleep(5)
print('Ended')
Thread(target=sleeping, daemon=True).start()
print('Hello world')
time.sleep(5.5)
|
normal
|
{
"blob_id": "628fdf848079d0ecf5bf4f5bd46e07ad6cd10358",
"index": 5070,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\nThread(target=sleeping, daemon=True).start()\nprint('Hello world')\ntime.sleep(5.5)\n",
"step-4": "from threading import Thread\nimport time\n\n\ndef sleeping():\n time.sleep(5)\n print('Ended')\n\n\nThread(target=sleeping, daemon=True).start()\nprint('Hello world')\ntime.sleep(5.5)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.