code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
# encoding=utf8
import json
import time
from util import http_util
available = 1
disable = 0
# 交易所域名
REST_URL = "https://api.bithumb.com"
PAYMENT_CURRENCY_BTC = "BTC"
PAYMENT_CURRENCY_KRW = "KRW"
# 成功码
SUCCESS_CODE = "0000"
# 没有交易对的错误码
NO_SYMBOL_CODE = ["5600", "5500"]
# bithumb提现的最小值,获取地址:https://apidocs.bithumb.com/docs/withdrawal_coin
MIN_WITHDRAWAL_QUANTITY = """BTC: 0.002 | ETH: 0.01 | LTC: 0.1 | ETC: 0.1 | XRP: 21 | BCH: 0.002 | BTG: 0.002 | EOS: 0.5 | ICX: 4 | TRX: 150 | ELF: 10 | OMG: 2 | GLM: 30 | ZIL: 30 | POWR: 23 | LRC: 42 | EOSDAC: 10 | STEEM: 0.01 | STRAX: 0.2 | ZRX: 6 | REP: 0.08 | XEM: 4 | SNT: 23 | ADA: 1 | BAT: 3 | WTC: 1.4 | LOOM: 22 | WAVES: 2 | TRUE: 10 | LINK: 0.11 | MEETONE: 10 | HORUS: 10 | ADD: 100 | RNT: 300 | ENJ: 2 | VET: 200 | MTL: 0.9 | CHL: 100 | BLACK: 10 | ATD: 100 | IOST: 1000 | TMTG: 360 | QKC: 2000 | HDAC: 200 | WET: 840 | AMO: 7000 | BSV: 0.002 | BXA: 15 | DAC: 670 | ORBS: 24 | TFUEL: 10 | VALOR: 5 | CON: 460 | ANKR: 27 | MIX: 360 | LAMB: 40 | CRO: 17 | FX: 10 | CHR: 12 | MBL: 3500 | MXC: 72 | WIN: 1 | DVP: 56 | FCT: 20 | FNB: 460 | TRV: 100 | PCM: 170 | DAD: 12 | AOA: 560 | XSR: 1300 | WOM: 15 | SOC: 360 | EM: 1000 | QBZ: 340 | BOA: 10 | FLETA: 180 | SXP: 0.9 | COS: 97 | APIX: 36 | EL: 170 | BASIC: 460 | HIVE: 18 | XPR: 800 | FIT: 720 | EGG: 360 | BORA: 17 | ARPA: 35 | APM: 100 | CKB: 170 | AERGO: 13 | ANW: 28 | CENNZ: 60 | EVZ: 44 | MCI: 170 | SRM: 0.7 | QTCON: 56 | UNI: 0.13 | YFI: 0.0001 | UMA: 0.17 | AAVE: 0.01 | COMP: 0.01 | REN: 5 | BAL: 0.08 | RSR: 59 | NMR: 0.07 | RLC: 2 | UOS: 9 | SAND: 7 | CVT: 18 | STPT: 63 | GOM2: 320 | RINGX: 28 | BEL: 0.8 | DVC: 11 | OBSR: 170 | ORC: 2 | POLA: 15 | AWO: 270 | ADP: 59 | DVI: 9 | IBP: 25 | GHX: 5 | MIR: 0.5 | CBK: 0.5 | ONX: 5 | MVC: 25 | BLY: 25 | WOZX: 3 | ANV: 2 | GRT: 3 | MM: 4 | BIOT: 77 | XNO: 12 | SNX: 0.2 | RAI: 2 | COLA: 5 | NU: 8 | OXT: 6 | LINA: 34 | MAP: 34 | AQT: 0.6 | WIKEN: 130 | MANA: 5 | LPT: 0.15 | MKR: 0.0014 | SUSHI: 0.23 | NSBT: 0.3 | DON: 2 | ASM: 9 | PUNDIX: 0.7 | CELR: 50 | ARW: 0.5 | MSB: 10 | RLY: 5 | OCEAN: 4 | BFC: 25 | ALICE: 0.4 | CHZ: 9 | BCD: 2 | GXC: 4 | BTT: 5000 | VSYS: 100 | IPX: 80 | WICC: 32 | ONT: 7 | LUNA: 12 | NEWS: 10 | AION: 35 | META: 300 | ONG: 25 | ALGO: 4 | JST: 250 | XTZ: 1.2 | MLK: 20 | WEMIX: 40 | DOT: 1.5 | SUN: 1 | ATOM: 1 | SSX: 42 | TEMCO: 2000 | LZM: 25 | HIBS: 250 | BURGER: 0.9"""
all_coin = []
currency_min_withdrawal_quantity = dict()
for q in str.split(MIN_WITHDRAWAL_QUANTITY, "|"):
c_list = q.strip().split(":")
currency_min_withdrawal_quantity[c_list[0].strip()] = float(c_list[1].strip())
all_coin.append(c_list[0].strip())
def get_all_coin_btc_ticker():
"""
:return: 获取所有币的使用btc购买时的ticker
"""
result = []
for coin in all_coin:
if coin != PAYMENT_CURRENCY_BTC:
r = get_ticker(coin, PAYMENT_CURRENCY_BTC)
if r is not None:
result.append({"coin": coin, "r": r})
time.sleep(0.05)
return result
def get_ticker(order_currency, payment_currency):
"""
获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker
https://api.bithumb.com/public/ticker/BTC_KRW
:return:
{
"status":"0000",
"data":{"opening_price":"63241000","closing_price":"63651000","min_price":"62944000","max_price":"65351000","units_traded":"2715.2835537","acc_trade_value":"173294332105.4152","prev_closing_price":"63239000","units_traded_24H":"3471.39085837","acc_trade_value_24H":"221565547379.3369","fluctate_24H":"418000","fluctate_rate_24H":"0.66","date":"1619687597790"}
}
"""
url = REST_URL + "/public/ticker/{}_{}".format(order_currency, payment_currency)
return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)
def get_min_withdrawal(coin):
"""
获取指定的资产的提现最小值
:param coin: 提现的资产,如BTC、ETH
:return:
传入的单元最少可以提现多少
"""
return currency_min_withdrawal_quantity[coin]
def get_asset_status(coin):
url = REST_URL + "/public/assetsstatus/{}".format(coin)
"""
获取资产的状态信息:https://apidocs.bithumb.com/docs/assets_status
https://api.bithumb.com/public/assetsstatus/{order_currency}
:param coin: 币
:return:
传入的币的状态,第一个值表示是否可以存入,第二个值表示是否可以提取
{
"status" : "0000",
"data" :
[
{
"deposit_status" : 1,
"withdrawal_status" : 0
}
]
}
"""
data = check_and_get_data(http_util.get(url), None)
return data["deposit_status"] == available, data["withdrawal_status"] == available
def check_and_get_data(response, ignore_codes):
body = json.loads(response.text)
status = body["status"]
if SUCCESS_CODE != status:
if status in ignore_codes:
print("返回的status={}编码不是成功,message={}".format(status, body["message"]))
return None
else:
raise BaseException("返回的status={}编码不是成功,message={}".format(status, body["message"]))
return body["data"]
|
normal
|
{
"blob_id": "f268dc4c2ae2c17e7d0d3921d29e6b952fc63c7d",
"index": 9802,
"step-1": "<mask token>\n\n\ndef get_ticker(order_currency, payment_currency):\n \"\"\"\n 获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker\n https://api.bithumb.com/public/ticker/BTC_KRW\n :return:\n {\n \"status\":\"0000\",\n \"data\":{\"opening_price\":\"63241000\",\"closing_price\":\"63651000\",\"min_price\":\"62944000\",\"max_price\":\"65351000\",\"units_traded\":\"2715.2835537\",\"acc_trade_value\":\"173294332105.4152\",\"prev_closing_price\":\"63239000\",\"units_traded_24H\":\"3471.39085837\",\"acc_trade_value_24H\":\"221565547379.3369\",\"fluctate_24H\":\"418000\",\"fluctate_rate_24H\":\"0.66\",\"date\":\"1619687597790\"}\n }\n \"\"\"\n url = REST_URL + '/public/ticker/{}_{}'.format(order_currency,\n payment_currency)\n return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)\n\n\ndef get_min_withdrawal(coin):\n \"\"\"\n 获取指定的资产的提现最小值\n :param coin: 提现的资产,如BTC、ETH\n :return:\n 传入的单元最少可以提现多少\n \"\"\"\n return currency_min_withdrawal_quantity[coin]\n\n\n<mask token>\n\n\ndef check_and_get_data(response, ignore_codes):\n body = json.loads(response.text)\n status = body['status']\n if SUCCESS_CODE != status:\n if status in ignore_codes:\n print('返回的status={}编码不是成功,message={}'.format(status, body[\n 'message']))\n return None\n else:\n raise BaseException('返回的status={}编码不是成功,message={}'.format(\n status, body['message']))\n return body['data']\n",
"step-2": "<mask token>\nfor q in str.split(MIN_WITHDRAWAL_QUANTITY, '|'):\n c_list = q.strip().split(':')\n currency_min_withdrawal_quantity[c_list[0].strip()] = float(c_list[1].\n strip())\n all_coin.append(c_list[0].strip())\n\n\ndef get_all_coin_btc_ticker():\n \"\"\"\n :return: 获取所有币的使用btc购买时的ticker\n \"\"\"\n result = []\n for coin in all_coin:\n if coin != PAYMENT_CURRENCY_BTC:\n r = get_ticker(coin, PAYMENT_CURRENCY_BTC)\n if r is not None:\n result.append({'coin': coin, 'r': r})\n time.sleep(0.05)\n return result\n\n\ndef get_ticker(order_currency, payment_currency):\n \"\"\"\n 获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker\n https://api.bithumb.com/public/ticker/BTC_KRW\n :return:\n {\n \"status\":\"0000\",\n \"data\":{\"opening_price\":\"63241000\",\"closing_price\":\"63651000\",\"min_price\":\"62944000\",\"max_price\":\"65351000\",\"units_traded\":\"2715.2835537\",\"acc_trade_value\":\"173294332105.4152\",\"prev_closing_price\":\"63239000\",\"units_traded_24H\":\"3471.39085837\",\"acc_trade_value_24H\":\"221565547379.3369\",\"fluctate_24H\":\"418000\",\"fluctate_rate_24H\":\"0.66\",\"date\":\"1619687597790\"}\n }\n \"\"\"\n url = REST_URL + '/public/ticker/{}_{}'.format(order_currency,\n payment_currency)\n return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)\n\n\ndef get_min_withdrawal(coin):\n \"\"\"\n 获取指定的资产的提现最小值\n :param coin: 提现的资产,如BTC、ETH\n :return:\n 传入的单元最少可以提现多少\n \"\"\"\n return currency_min_withdrawal_quantity[coin]\n\n\ndef get_asset_status(coin):\n url = REST_URL + '/public/assetsstatus/{}'.format(coin)\n \"\"\"\n 获取资产的状态信息:https://apidocs.bithumb.com/docs/assets_status\n https://api.bithumb.com/public/assetsstatus/{order_currency}\n :param coin: 币\n :return:\n 传入的币的状态,第一个值表示是否可以存入,第二个值表示是否可以提取\n {\n \"status\" : \"0000\",\n \"data\" :\n [\n {\n \"deposit_status\" : 1,\n \"withdrawal_status\" : 0\n }\n ]\n }\n \"\"\"\n data = check_and_get_data(http_util.get(url), None)\n return data['deposit_status'] == available, data['withdrawal_status'\n ] == available\n\n\ndef check_and_get_data(response, ignore_codes):\n body = json.loads(response.text)\n status = body['status']\n if SUCCESS_CODE != status:\n if status in ignore_codes:\n print('返回的status={}编码不是成功,message={}'.format(status, body[\n 'message']))\n return None\n else:\n raise BaseException('返回的status={}编码不是成功,message={}'.format(\n status, body['message']))\n return body['data']\n",
"step-3": "<mask token>\navailable = 1\ndisable = 0\nREST_URL = 'https://api.bithumb.com'\nPAYMENT_CURRENCY_BTC = 'BTC'\nPAYMENT_CURRENCY_KRW = 'KRW'\nSUCCESS_CODE = '0000'\nNO_SYMBOL_CODE = ['5600', '5500']\nMIN_WITHDRAWAL_QUANTITY = (\n 'BTC: 0.002 | ETH: 0.01 | LTC: 0.1 | ETC: 0.1 | XRP: 21 | BCH: 0.002 | BTG: 0.002 | EOS: 0.5 | ICX: 4 | TRX: 150 | ELF: 10 | OMG: 2 | GLM: 30 | ZIL: 30 | POWR: 23 | LRC: 42 | EOSDAC: 10 | STEEM: 0.01 | STRAX: 0.2 | ZRX: 6 | REP: 0.08 | XEM: 4 | SNT: 23 | ADA: 1 | BAT: 3 | WTC: 1.4 | LOOM: 22 | WAVES: 2 | TRUE: 10 | LINK: 0.11 | MEETONE: 10 | HORUS: 10 | ADD: 100 | RNT: 300 | ENJ: 2 | VET: 200 | MTL: 0.9 | CHL: 100 | BLACK: 10 | ATD: 100 | IOST: 1000 | TMTG: 360 | QKC: 2000 | HDAC: 200 | WET: 840 | AMO: 7000 | BSV: 0.002 | BXA: 15 | DAC: 670 | ORBS: 24 | TFUEL: 10 | VALOR: 5 | CON: 460 | ANKR: 27 | MIX: 360 | LAMB: 40 | CRO: 17 | FX: 10 | CHR: 12 | MBL: 3500 | MXC: 72 | WIN: 1 | DVP: 56 | FCT: 20 | FNB: 460 | TRV: 100 | PCM: 170 | DAD: 12 | AOA: 560 | XSR: 1300 | WOM: 15 | SOC: 360 | EM: 1000 | QBZ: 340 | BOA: 10 | FLETA: 180 | SXP: 0.9 | COS: 97 | APIX: 36 | EL: 170 | BASIC: 460 | HIVE: 18 | XPR: 800 | FIT: 720 | EGG: 360 | BORA: 17 | ARPA: 35 | APM: 100 | CKB: 170 | AERGO: 13 | ANW: 28 | CENNZ: 60 | EVZ: 44 | MCI: 170 | SRM: 0.7 | QTCON: 56 | UNI: 0.13 | YFI: 0.0001 | UMA: 0.17 | AAVE: 0.01 | COMP: 0.01 | REN: 5 | BAL: 0.08 | RSR: 59 | NMR: 0.07 | RLC: 2 | UOS: 9 | SAND: 7 | CVT: 18 | STPT: 63 | GOM2: 320 | RINGX: 28 | BEL: 0.8 | DVC: 11 | OBSR: 170 | ORC: 2 | POLA: 15 | AWO: 270 | ADP: 59 | DVI: 9 | IBP: 25 | GHX: 5 | MIR: 0.5 | CBK: 0.5 | ONX: 5 | MVC: 25 | BLY: 25 | WOZX: 3 | ANV: 2 | GRT: 3 | MM: 4 | BIOT: 77 | XNO: 12 | SNX: 0.2 | RAI: 2 | COLA: 5 | NU: 8 | OXT: 6 | LINA: 34 | MAP: 34 | AQT: 0.6 | WIKEN: 130 | MANA: 5 | LPT: 0.15 | MKR: 0.0014 | SUSHI: 0.23 | NSBT: 0.3 | DON: 2 | ASM: 9 | PUNDIX: 0.7 | CELR: 50 | ARW: 0.5 | MSB: 10 | RLY: 5 | OCEAN: 4 | BFC: 25 | ALICE: 0.4 | CHZ: 9 | BCD: 2 | GXC: 4 | BTT: 5000 | VSYS: 100 | IPX: 80 | WICC: 32 | ONT: 7 | LUNA: 12 | NEWS: 10 | AION: 35 | META: 300 | ONG: 25 | ALGO: 4 | JST: 250 | XTZ: 1.2 | MLK: 20 | WEMIX: 40 | DOT: 1.5 | SUN: 1 | ATOM: 1 | SSX: 42 | TEMCO: 2000 | LZM: 25 | HIBS: 250 | BURGER: 0.9'\n )\nall_coin = []\ncurrency_min_withdrawal_quantity = dict()\nfor q in str.split(MIN_WITHDRAWAL_QUANTITY, '|'):\n c_list = q.strip().split(':')\n currency_min_withdrawal_quantity[c_list[0].strip()] = float(c_list[1].\n strip())\n all_coin.append(c_list[0].strip())\n\n\ndef get_all_coin_btc_ticker():\n \"\"\"\n :return: 获取所有币的使用btc购买时的ticker\n \"\"\"\n result = []\n for coin in all_coin:\n if coin != PAYMENT_CURRENCY_BTC:\n r = get_ticker(coin, PAYMENT_CURRENCY_BTC)\n if r is not None:\n result.append({'coin': coin, 'r': r})\n time.sleep(0.05)\n return result\n\n\ndef get_ticker(order_currency, payment_currency):\n \"\"\"\n 获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker\n https://api.bithumb.com/public/ticker/BTC_KRW\n :return:\n {\n \"status\":\"0000\",\n \"data\":{\"opening_price\":\"63241000\",\"closing_price\":\"63651000\",\"min_price\":\"62944000\",\"max_price\":\"65351000\",\"units_traded\":\"2715.2835537\",\"acc_trade_value\":\"173294332105.4152\",\"prev_closing_price\":\"63239000\",\"units_traded_24H\":\"3471.39085837\",\"acc_trade_value_24H\":\"221565547379.3369\",\"fluctate_24H\":\"418000\",\"fluctate_rate_24H\":\"0.66\",\"date\":\"1619687597790\"}\n }\n \"\"\"\n url = REST_URL + '/public/ticker/{}_{}'.format(order_currency,\n payment_currency)\n return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)\n\n\ndef get_min_withdrawal(coin):\n \"\"\"\n 获取指定的资产的提现最小值\n :param coin: 提现的资产,如BTC、ETH\n :return:\n 传入的单元最少可以提现多少\n \"\"\"\n return currency_min_withdrawal_quantity[coin]\n\n\ndef get_asset_status(coin):\n url = REST_URL + '/public/assetsstatus/{}'.format(coin)\n \"\"\"\n 获取资产的状态信息:https://apidocs.bithumb.com/docs/assets_status\n https://api.bithumb.com/public/assetsstatus/{order_currency}\n :param coin: 币\n :return:\n 传入的币的状态,第一个值表示是否可以存入,第二个值表示是否可以提取\n {\n \"status\" : \"0000\",\n \"data\" :\n [\n {\n \"deposit_status\" : 1,\n \"withdrawal_status\" : 0\n }\n ]\n }\n \"\"\"\n data = check_and_get_data(http_util.get(url), None)\n return data['deposit_status'] == available, data['withdrawal_status'\n ] == available\n\n\ndef check_and_get_data(response, ignore_codes):\n body = json.loads(response.text)\n status = body['status']\n if SUCCESS_CODE != status:\n if status in ignore_codes:\n print('返回的status={}编码不是成功,message={}'.format(status, body[\n 'message']))\n return None\n else:\n raise BaseException('返回的status={}编码不是成功,message={}'.format(\n status, body['message']))\n return body['data']\n",
"step-4": "import json\nimport time\nfrom util import http_util\navailable = 1\ndisable = 0\nREST_URL = 'https://api.bithumb.com'\nPAYMENT_CURRENCY_BTC = 'BTC'\nPAYMENT_CURRENCY_KRW = 'KRW'\nSUCCESS_CODE = '0000'\nNO_SYMBOL_CODE = ['5600', '5500']\nMIN_WITHDRAWAL_QUANTITY = (\n 'BTC: 0.002 | ETH: 0.01 | LTC: 0.1 | ETC: 0.1 | XRP: 21 | BCH: 0.002 | BTG: 0.002 | EOS: 0.5 | ICX: 4 | TRX: 150 | ELF: 10 | OMG: 2 | GLM: 30 | ZIL: 30 | POWR: 23 | LRC: 42 | EOSDAC: 10 | STEEM: 0.01 | STRAX: 0.2 | ZRX: 6 | REP: 0.08 | XEM: 4 | SNT: 23 | ADA: 1 | BAT: 3 | WTC: 1.4 | LOOM: 22 | WAVES: 2 | TRUE: 10 | LINK: 0.11 | MEETONE: 10 | HORUS: 10 | ADD: 100 | RNT: 300 | ENJ: 2 | VET: 200 | MTL: 0.9 | CHL: 100 | BLACK: 10 | ATD: 100 | IOST: 1000 | TMTG: 360 | QKC: 2000 | HDAC: 200 | WET: 840 | AMO: 7000 | BSV: 0.002 | BXA: 15 | DAC: 670 | ORBS: 24 | TFUEL: 10 | VALOR: 5 | CON: 460 | ANKR: 27 | MIX: 360 | LAMB: 40 | CRO: 17 | FX: 10 | CHR: 12 | MBL: 3500 | MXC: 72 | WIN: 1 | DVP: 56 | FCT: 20 | FNB: 460 | TRV: 100 | PCM: 170 | DAD: 12 | AOA: 560 | XSR: 1300 | WOM: 15 | SOC: 360 | EM: 1000 | QBZ: 340 | BOA: 10 | FLETA: 180 | SXP: 0.9 | COS: 97 | APIX: 36 | EL: 170 | BASIC: 460 | HIVE: 18 | XPR: 800 | FIT: 720 | EGG: 360 | BORA: 17 | ARPA: 35 | APM: 100 | CKB: 170 | AERGO: 13 | ANW: 28 | CENNZ: 60 | EVZ: 44 | MCI: 170 | SRM: 0.7 | QTCON: 56 | UNI: 0.13 | YFI: 0.0001 | UMA: 0.17 | AAVE: 0.01 | COMP: 0.01 | REN: 5 | BAL: 0.08 | RSR: 59 | NMR: 0.07 | RLC: 2 | UOS: 9 | SAND: 7 | CVT: 18 | STPT: 63 | GOM2: 320 | RINGX: 28 | BEL: 0.8 | DVC: 11 | OBSR: 170 | ORC: 2 | POLA: 15 | AWO: 270 | ADP: 59 | DVI: 9 | IBP: 25 | GHX: 5 | MIR: 0.5 | CBK: 0.5 | ONX: 5 | MVC: 25 | BLY: 25 | WOZX: 3 | ANV: 2 | GRT: 3 | MM: 4 | BIOT: 77 | XNO: 12 | SNX: 0.2 | RAI: 2 | COLA: 5 | NU: 8 | OXT: 6 | LINA: 34 | MAP: 34 | AQT: 0.6 | WIKEN: 130 | MANA: 5 | LPT: 0.15 | MKR: 0.0014 | SUSHI: 0.23 | NSBT: 0.3 | DON: 2 | ASM: 9 | PUNDIX: 0.7 | CELR: 50 | ARW: 0.5 | MSB: 10 | RLY: 5 | OCEAN: 4 | BFC: 25 | ALICE: 0.4 | CHZ: 9 | BCD: 2 | GXC: 4 | BTT: 5000 | VSYS: 100 | IPX: 80 | WICC: 32 | ONT: 7 | LUNA: 12 | NEWS: 10 | AION: 35 | META: 300 | ONG: 25 | ALGO: 4 | JST: 250 | XTZ: 1.2 | MLK: 20 | WEMIX: 40 | DOT: 1.5 | SUN: 1 | ATOM: 1 | SSX: 42 | TEMCO: 2000 | LZM: 25 | HIBS: 250 | BURGER: 0.9'\n )\nall_coin = []\ncurrency_min_withdrawal_quantity = dict()\nfor q in str.split(MIN_WITHDRAWAL_QUANTITY, '|'):\n c_list = q.strip().split(':')\n currency_min_withdrawal_quantity[c_list[0].strip()] = float(c_list[1].\n strip())\n all_coin.append(c_list[0].strip())\n\n\ndef get_all_coin_btc_ticker():\n \"\"\"\n :return: 获取所有币的使用btc购买时的ticker\n \"\"\"\n result = []\n for coin in all_coin:\n if coin != PAYMENT_CURRENCY_BTC:\n r = get_ticker(coin, PAYMENT_CURRENCY_BTC)\n if r is not None:\n result.append({'coin': coin, 'r': r})\n time.sleep(0.05)\n return result\n\n\ndef get_ticker(order_currency, payment_currency):\n \"\"\"\n 获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker\n https://api.bithumb.com/public/ticker/BTC_KRW\n :return:\n {\n \"status\":\"0000\",\n \"data\":{\"opening_price\":\"63241000\",\"closing_price\":\"63651000\",\"min_price\":\"62944000\",\"max_price\":\"65351000\",\"units_traded\":\"2715.2835537\",\"acc_trade_value\":\"173294332105.4152\",\"prev_closing_price\":\"63239000\",\"units_traded_24H\":\"3471.39085837\",\"acc_trade_value_24H\":\"221565547379.3369\",\"fluctate_24H\":\"418000\",\"fluctate_rate_24H\":\"0.66\",\"date\":\"1619687597790\"}\n }\n \"\"\"\n url = REST_URL + '/public/ticker/{}_{}'.format(order_currency,\n payment_currency)\n return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)\n\n\ndef get_min_withdrawal(coin):\n \"\"\"\n 获取指定的资产的提现最小值\n :param coin: 提现的资产,如BTC、ETH\n :return:\n 传入的单元最少可以提现多少\n \"\"\"\n return currency_min_withdrawal_quantity[coin]\n\n\ndef get_asset_status(coin):\n url = REST_URL + '/public/assetsstatus/{}'.format(coin)\n \"\"\"\n 获取资产的状态信息:https://apidocs.bithumb.com/docs/assets_status\n https://api.bithumb.com/public/assetsstatus/{order_currency}\n :param coin: 币\n :return:\n 传入的币的状态,第一个值表示是否可以存入,第二个值表示是否可以提取\n {\n \"status\" : \"0000\",\n \"data\" :\n [\n {\n \"deposit_status\" : 1,\n \"withdrawal_status\" : 0\n }\n ]\n }\n \"\"\"\n data = check_and_get_data(http_util.get(url), None)\n return data['deposit_status'] == available, data['withdrawal_status'\n ] == available\n\n\ndef check_and_get_data(response, ignore_codes):\n body = json.loads(response.text)\n status = body['status']\n if SUCCESS_CODE != status:\n if status in ignore_codes:\n print('返回的status={}编码不是成功,message={}'.format(status, body[\n 'message']))\n return None\n else:\n raise BaseException('返回的status={}编码不是成功,message={}'.format(\n status, body['message']))\n return body['data']\n",
"step-5": "# encoding=utf8\nimport json\nimport time\n\nfrom util import http_util\n\navailable = 1\ndisable = 0\n\n# 交易所域名\nREST_URL = \"https://api.bithumb.com\"\n\nPAYMENT_CURRENCY_BTC = \"BTC\"\nPAYMENT_CURRENCY_KRW = \"KRW\"\n\n# 成功码\nSUCCESS_CODE = \"0000\"\n# 没有交易对的错误码\nNO_SYMBOL_CODE = [\"5600\", \"5500\"]\n\n# bithumb提现的最小值,获取地址:https://apidocs.bithumb.com/docs/withdrawal_coin\nMIN_WITHDRAWAL_QUANTITY = \"\"\"BTC: 0.002 | ETH: 0.01 | LTC: 0.1 | ETC: 0.1 | XRP: 21 | BCH: 0.002 | BTG: 0.002 | EOS: 0.5 | ICX: 4 | TRX: 150 | ELF: 10 | OMG: 2 | GLM: 30 | ZIL: 30 | POWR: 23 | LRC: 42 | EOSDAC: 10 | STEEM: 0.01 | STRAX: 0.2 | ZRX: 6 | REP: 0.08 | XEM: 4 | SNT: 23 | ADA: 1 | BAT: 3 | WTC: 1.4 | LOOM: 22 | WAVES: 2 | TRUE: 10 | LINK: 0.11 | MEETONE: 10 | HORUS: 10 | ADD: 100 | RNT: 300 | ENJ: 2 | VET: 200 | MTL: 0.9 | CHL: 100 | BLACK: 10 | ATD: 100 | IOST: 1000 | TMTG: 360 | QKC: 2000 | HDAC: 200 | WET: 840 | AMO: 7000 | BSV: 0.002 | BXA: 15 | DAC: 670 | ORBS: 24 | TFUEL: 10 | VALOR: 5 | CON: 460 | ANKR: 27 | MIX: 360 | LAMB: 40 | CRO: 17 | FX: 10 | CHR: 12 | MBL: 3500 | MXC: 72 | WIN: 1 | DVP: 56 | FCT: 20 | FNB: 460 | TRV: 100 | PCM: 170 | DAD: 12 | AOA: 560 | XSR: 1300 | WOM: 15 | SOC: 360 | EM: 1000 | QBZ: 340 | BOA: 10 | FLETA: 180 | SXP: 0.9 | COS: 97 | APIX: 36 | EL: 170 | BASIC: 460 | HIVE: 18 | XPR: 800 | FIT: 720 | EGG: 360 | BORA: 17 | ARPA: 35 | APM: 100 | CKB: 170 | AERGO: 13 | ANW: 28 | CENNZ: 60 | EVZ: 44 | MCI: 170 | SRM: 0.7 | QTCON: 56 | UNI: 0.13 | YFI: 0.0001 | UMA: 0.17 | AAVE: 0.01 | COMP: 0.01 | REN: 5 | BAL: 0.08 | RSR: 59 | NMR: 0.07 | RLC: 2 | UOS: 9 | SAND: 7 | CVT: 18 | STPT: 63 | GOM2: 320 | RINGX: 28 | BEL: 0.8 | DVC: 11 | OBSR: 170 | ORC: 2 | POLA: 15 | AWO: 270 | ADP: 59 | DVI: 9 | IBP: 25 | GHX: 5 | MIR: 0.5 | CBK: 0.5 | ONX: 5 | MVC: 25 | BLY: 25 | WOZX: 3 | ANV: 2 | GRT: 3 | MM: 4 | BIOT: 77 | XNO: 12 | SNX: 0.2 | RAI: 2 | COLA: 5 | NU: 8 | OXT: 6 | LINA: 34 | MAP: 34 | AQT: 0.6 | WIKEN: 130 | MANA: 5 | LPT: 0.15 | MKR: 0.0014 | SUSHI: 0.23 | NSBT: 0.3 | DON: 2 | ASM: 9 | PUNDIX: 0.7 | CELR: 50 | ARW: 0.5 | MSB: 10 | RLY: 5 | OCEAN: 4 | BFC: 25 | ALICE: 0.4 | CHZ: 9 | BCD: 2 | GXC: 4 | BTT: 5000 | VSYS: 100 | IPX: 80 | WICC: 32 | ONT: 7 | LUNA: 12 | NEWS: 10 | AION: 35 | META: 300 | ONG: 25 | ALGO: 4 | JST: 250 | XTZ: 1.2 | MLK: 20 | WEMIX: 40 | DOT: 1.5 | SUN: 1 | ATOM: 1 | SSX: 42 | TEMCO: 2000 | LZM: 25 | HIBS: 250 | BURGER: 0.9\"\"\"\n\nall_coin = []\n\ncurrency_min_withdrawal_quantity = dict()\n\nfor q in str.split(MIN_WITHDRAWAL_QUANTITY, \"|\"):\n c_list = q.strip().split(\":\")\n currency_min_withdrawal_quantity[c_list[0].strip()] = float(c_list[1].strip())\n all_coin.append(c_list[0].strip())\n\n\ndef get_all_coin_btc_ticker():\n \"\"\"\n :return: 获取所有币的使用btc购买时的ticker\n \"\"\"\n result = []\n for coin in all_coin:\n if coin != PAYMENT_CURRENCY_BTC:\n r = get_ticker(coin, PAYMENT_CURRENCY_BTC)\n if r is not None:\n result.append({\"coin\": coin, \"r\": r})\n time.sleep(0.05)\n return result\n\n\ndef get_ticker(order_currency, payment_currency):\n \"\"\"\n 获取指定交易对的ticker信息:https://apidocs.bithumb.com/docs/ticker\n https://api.bithumb.com/public/ticker/BTC_KRW\n :return:\n {\n \"status\":\"0000\",\n \"data\":{\"opening_price\":\"63241000\",\"closing_price\":\"63651000\",\"min_price\":\"62944000\",\"max_price\":\"65351000\",\"units_traded\":\"2715.2835537\",\"acc_trade_value\":\"173294332105.4152\",\"prev_closing_price\":\"63239000\",\"units_traded_24H\":\"3471.39085837\",\"acc_trade_value_24H\":\"221565547379.3369\",\"fluctate_24H\":\"418000\",\"fluctate_rate_24H\":\"0.66\",\"date\":\"1619687597790\"}\n }\n \"\"\"\n url = REST_URL + \"/public/ticker/{}_{}\".format(order_currency, payment_currency)\n return check_and_get_data(http_util.get(url), NO_SYMBOL_CODE)\n\n\ndef get_min_withdrawal(coin):\n \"\"\"\n 获取指定的资产的提现最小值\n :param coin: 提现的资产,如BTC、ETH\n :return:\n 传入的单元最少可以提现多少\n \"\"\"\n return currency_min_withdrawal_quantity[coin]\n\n\ndef get_asset_status(coin):\n url = REST_URL + \"/public/assetsstatus/{}\".format(coin)\n \"\"\"\n 获取资产的状态信息:https://apidocs.bithumb.com/docs/assets_status\n https://api.bithumb.com/public/assetsstatus/{order_currency}\n :param coin: 币\n :return:\n 传入的币的状态,第一个值表示是否可以存入,第二个值表示是否可以提取\n {\n \"status\" : \"0000\",\n \"data\" :\n [\n {\n \"deposit_status\" : 1,\n \"withdrawal_status\" : 0\n }\n ]\n }\n \"\"\"\n data = check_and_get_data(http_util.get(url), None)\n return data[\"deposit_status\"] == available, data[\"withdrawal_status\"] == available\n\n\ndef check_and_get_data(response, ignore_codes):\n body = json.loads(response.text)\n status = body[\"status\"]\n if SUCCESS_CODE != status:\n if status in ignore_codes:\n print(\"返回的status={}编码不是成功,message={}\".format(status, body[\"message\"]))\n return None\n else:\n raise BaseException(\"返回的status={}编码不是成功,message={}\".format(status, body[\"message\"]))\n return body[\"data\"]\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
return os.getenv('DB_HOST')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv(verbose=True)
<|reserved_special_token_0|>
if bool(os.getenv('IS_DEV')):
logger = logging.getLogger('orator.connection.queries')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(elapsed_time)sms %(query)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
@app.route('/')
def index():
return os.getenv('DB_HOST')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv(verbose=True)
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY')
app.config['JSON_SORT_KEYS'] = False
app.config['ORATOR_DATABASES'] = {'default': 'mysql', 'mysql': {'driver':
'mysql', 'host': os.getenv('DB_HOST'), 'database': os.getenv('DB_NAME'),
'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'),
'prefix': '', 'log_queries': bool(os.getenv('LOG_QUERIES'))}}
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY')
app.config['JWT_TOKEN_LOCATION'] = ['headers']
db = Orator(app)
jwt = JWTManager(app)
if bool(os.getenv('IS_DEV')):
logger = logging.getLogger('orator.connection.queries')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(elapsed_time)sms %(query)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
@app.route('/')
def index():
return os.getenv('DB_HOST')
<|reserved_special_token_1|>
import os
import logging
from flask import Flask
from flask_orator import Orator
from flask_jwt_extended import JWTManager
from dotenv import load_dotenv
load_dotenv(verbose=True)
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY')
app.config['JSON_SORT_KEYS'] = False
app.config['ORATOR_DATABASES'] = {'default': 'mysql', 'mysql': {'driver':
'mysql', 'host': os.getenv('DB_HOST'), 'database': os.getenv('DB_NAME'),
'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'),
'prefix': '', 'log_queries': bool(os.getenv('LOG_QUERIES'))}}
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY')
app.config['JWT_TOKEN_LOCATION'] = ['headers']
db = Orator(app)
jwt = JWTManager(app)
if bool(os.getenv('IS_DEV')):
logger = logging.getLogger('orator.connection.queries')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(elapsed_time)sms %(query)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
@app.route('/')
def index():
return os.getenv('DB_HOST')
<|reserved_special_token_1|>
import os
import logging
from flask import Flask
from flask_orator import Orator
from flask_jwt_extended import JWTManager
from dotenv import load_dotenv
load_dotenv(verbose=True)
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY')
app.config['JSON_SORT_KEYS'] = False
app.config['ORATOR_DATABASES'] = {
'default': 'mysql',
'mysql': {
'driver': 'mysql',
'host': os.getenv('DB_HOST'),
'database': os.getenv('DB_NAME'),
'user': os.getenv('DB_USER'),
'password': os.getenv('DB_PASSWORD'),
'prefix': '',
'log_queries': bool(os.getenv('LOG_QUERIES'))
}
}
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY') # Change this!
app.config['JWT_TOKEN_LOCATION'] = ['headers'] # headers', 'cookies', 'query_string', 'json'
db = Orator(app)
jwt = JWTManager(app)
if bool(os.getenv('IS_DEV')):
logger = logging.getLogger('orator.connection.queries')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(elapsed_time)sms %(query)s'
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
@app.route('/')
def index():
return os.getenv('DB_HOST')
|
flexible
|
{
"blob_id": "f20e2227821c43de17c116d8c11233eda53ab631",
"index": 9967,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-2": "<mask token>\nload_dotenv(verbose=True)\n<mask token>\nif bool(os.getenv('IS_DEV')):\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(elapsed_time)sms %(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\n@app.route('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-3": "<mask token>\nload_dotenv(verbose=True)\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\napp.config['JSON_SORT_KEYS'] = False\napp.config['ORATOR_DATABASES'] = {'default': 'mysql', 'mysql': {'driver':\n 'mysql', 'host': os.getenv('DB_HOST'), 'database': os.getenv('DB_NAME'),\n 'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'),\n 'prefix': '', 'log_queries': bool(os.getenv('LOG_QUERIES'))}}\napp.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY')\napp.config['JWT_TOKEN_LOCATION'] = ['headers']\ndb = Orator(app)\njwt = JWTManager(app)\nif bool(os.getenv('IS_DEV')):\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(elapsed_time)sms %(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\n@app.route('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-4": "import os\nimport logging\nfrom flask import Flask\nfrom flask_orator import Orator\nfrom flask_jwt_extended import JWTManager\nfrom dotenv import load_dotenv\nload_dotenv(verbose=True)\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\napp.config['JSON_SORT_KEYS'] = False\napp.config['ORATOR_DATABASES'] = {'default': 'mysql', 'mysql': {'driver':\n 'mysql', 'host': os.getenv('DB_HOST'), 'database': os.getenv('DB_NAME'),\n 'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'),\n 'prefix': '', 'log_queries': bool(os.getenv('LOG_QUERIES'))}}\napp.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY')\napp.config['JWT_TOKEN_LOCATION'] = ['headers']\ndb = Orator(app)\njwt = JWTManager(app)\nif bool(os.getenv('IS_DEV')):\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(elapsed_time)sms %(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\n@app.route('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-5": "import os\nimport logging\nfrom flask import Flask\nfrom flask_orator import Orator\nfrom flask_jwt_extended import JWTManager\nfrom dotenv import load_dotenv\n\n\nload_dotenv(verbose=True)\n\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\napp.config['JSON_SORT_KEYS'] = False\napp.config['ORATOR_DATABASES'] = {\n 'default': 'mysql',\n 'mysql': {\n 'driver': 'mysql',\n 'host': os.getenv('DB_HOST'),\n 'database': os.getenv('DB_NAME'),\n 'user': os.getenv('DB_USER'),\n 'password': os.getenv('DB_PASSWORD'),\n 'prefix': '',\n 'log_queries': bool(os.getenv('LOG_QUERIES'))\n }\n}\n\napp.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY') # Change this!\napp.config['JWT_TOKEN_LOCATION'] = ['headers'] # headers', 'cookies', 'query_string', 'json'\n\ndb = Orator(app)\njwt = JWTManager(app)\n\nif bool(os.getenv('IS_DEV')):\n\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\n '%(elapsed_time)sms %(query)s'\n )\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n\n@app.route('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def puissance(x, n):
if n == 0:
return 1
else:
return x * puissance(x, n - 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def puissance(x, n):
if n == 0:
return 1
else:
return x * puissance(x, n - 1)
print(puissance(number, exposant))
<|reserved_special_token_1|>
number = int(input('entrez un entier:'))
exposant = int(input('entrez un exposant:'))
def puissance(x, n):
if n == 0:
return 1
else:
return x * puissance(x, n - 1)
print(puissance(number, exposant))
<|reserved_special_token_1|>
number = int(input("entrez un entier:"))
exposant = int(input("entrez un exposant:"))
def puissance(x, n):
if n == 0:
return 1
else:
return x * puissance(x, n-1)
print(puissance(number, exposant))
|
flexible
|
{
"blob_id": "beccae96b3b2c9dcd61bb538d07b85441a73662e",
"index": 9968,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef puissance(x, n):\n if n == 0:\n return 1\n else:\n return x * puissance(x, n - 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef puissance(x, n):\n if n == 0:\n return 1\n else:\n return x * puissance(x, n - 1)\n\n\nprint(puissance(number, exposant))\n",
"step-4": "number = int(input('entrez un entier:'))\nexposant = int(input('entrez un exposant:'))\n\n\ndef puissance(x, n):\n if n == 0:\n return 1\n else:\n return x * puissance(x, n - 1)\n\n\nprint(puissance(number, exposant))\n",
"step-5": "number = int(input(\"entrez un entier:\"))\nexposant = int(input(\"entrez un exposant:\"))\n\ndef puissance(x, n):\n if n == 0:\n return 1\n else:\n return x * puissance(x, n-1)\n\n\nprint(puissance(number, exposant))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import sys
import socket
__target__ = '${EXTERNAL_HOST}'
sources = {}
def process_the_source(fname, dest=None, host_ip=None, verbose=False):
assert (os.path.exists(fname) and os.path.isfile(fname)), 'Cannot proceed without the fname in process_the_source().'
the_lines = []
with open(fname, 'r') as fIn:
for line in fIn:
l = line.rstrip()
l = l.replace(__target__, host_ip)
the_lines.append(l)
with open(dest, 'w') as fOut:
for l in the_lines:
print(l, file=fOut)
assert (os.path.exists(dest) and os.path.isfile(dest)), 'Cannot proceed without the dest file in process_the_source().'
if (__name__ == '__main__'):
is_verbose = True
root = sys.argv[1]
host_ip = sys.argv[2]
assert (len(host_ip) > 0), 'Cannot proceed without the host ip address.'
assert (os.path.exists(root) and os.path.isdir(root)), 'Cannot proceed without the root in process_the_source().'
sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)
if (is_verbose):
print('BEGIN:')
for s,d in sources.items():
if (is_verbose):
print('{} -> {}'.format(s, d))
assert os.path.exists(s) and os.path.isfile(s), 'Cannot find "{}" so cannot proceed.'.format(s)
process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)
if (is_verbose):
print('END!!!')
if (is_verbose):
print()
print('Done.')
|
normal
|
{
"blob_id": "d6af9a75fbe8bdf1a81a352cee71ac81fb373b86",
"index": 9926,
"step-1": "<mask token>\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-3": "<mask token>\n__target__ = '${EXTERNAL_HOST}'\nsources = {}\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-4": "import os\nimport sys\nimport socket\n__target__ = '${EXTERNAL_HOST}'\nsources = {}\n\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert os.path.exists(fname) and os.path.isfile(fname\n ), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert os.path.exists(dest) and os.path.isfile(dest\n ), 'Cannot proceed without the dest file in process_the_source().'\n\n\nif __name__ == '__main__':\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert len(host_ip) > 0, 'Cannot proceed without the host ip address.'\n assert os.path.exists(root) and os.path.isdir(root\n ), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n if is_verbose:\n print('BEGIN:')\n for s, d in sources.items():\n if is_verbose:\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s\n ), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if is_verbose:\n print('END!!!')\n if is_verbose:\n print()\n print('Done.')\n",
"step-5": "import os\nimport sys\nimport socket\n\n__target__ = '${EXTERNAL_HOST}'\n\nsources = {}\n\ndef process_the_source(fname, dest=None, host_ip=None, verbose=False):\n assert (os.path.exists(fname) and os.path.isfile(fname)), 'Cannot proceed without the fname in process_the_source().'\n the_lines = []\n with open(fname, 'r') as fIn:\n for line in fIn:\n l = line.rstrip()\n l = l.replace(__target__, host_ip)\n the_lines.append(l)\n with open(dest, 'w') as fOut:\n for l in the_lines:\n print(l, file=fOut)\n assert (os.path.exists(dest) and os.path.isfile(dest)), 'Cannot proceed without the dest file in process_the_source().'\n \n\nif (__name__ == '__main__'):\n is_verbose = True\n root = sys.argv[1]\n host_ip = sys.argv[2]\n assert (len(host_ip) > 0), 'Cannot proceed without the host ip address.'\n\n assert (os.path.exists(root) and os.path.isdir(root)), 'Cannot proceed without the root in process_the_source().'\n sources['{}/.env'.format(root)] = '{}/code/.env'.format(root)\n\n if (is_verbose):\n print('BEGIN:')\n for s,d in sources.items():\n if (is_verbose):\n print('{} -> {}'.format(s, d))\n assert os.path.exists(s) and os.path.isfile(s), 'Cannot find \"{}\" so cannot proceed.'.format(s)\n process_the_source(s, dest=d, host_ip=host_ip, verbose=is_verbose)\n if (is_verbose):\n print('END!!!')\n\n if (is_verbose):\n print()\n print('Done.')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Turn off bytecode generation
import sys
from asgiref.sync import sync_to_async
from django.core.wsgi import get_wsgi_application
sys.dont_write_bytecode = True
# Django specific settings
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
import django
django.setup()
from db import models
def print_all_models():
return models.Sample.objects.all()
@sync_to_async
def _create_record(name):
return models.Sample.objects.create(name=name)
async def create_record(name=None):
await _create_record(name)
|
normal
|
{
"blob_id": "4afb556ceca89eb90ba800db4f383afad1cd42a5",
"index": 3765,
"step-1": "<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n<mask token>\ndjango.setup()\n<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-3": "<mask token>\nsys.dont_write_bytecode = True\n<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\n<mask token>\ndjango.setup()\n<mask token>\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-4": "import sys\nfrom asgiref.sync import sync_to_async\nfrom django.core.wsgi import get_wsgi_application\nsys.dont_write_bytecode = True\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')\nimport django\ndjango.setup()\nfrom db import models\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-5": "# Turn off bytecode generation\nimport sys\nfrom asgiref.sync import sync_to_async\nfrom django.core.wsgi import get_wsgi_application\n\n\nsys.dont_write_bytecode = True\n\n# Django specific settings\nimport os\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"settings\")\nimport django\n\ndjango.setup()\n\nfrom db import models\n\n\ndef print_all_models():\n return models.Sample.objects.all()\n\n\n@sync_to_async\ndef _create_record(name):\n return models.Sample.objects.create(name=name)\n\n\nasync def create_record(name=None):\n await _create_record(name)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
""""""
import random
import nbformat
from textwrap import dedent
from pybryt.preprocessors import IntermediateVariablePreprocessor
def test_preprocessor():
"""
"""
nb = nbformat.v4.new_notebook()
nb.cells.append(nbformat.v4.new_code_cell(dedent("""\
a = True
b = False
f = lambda x: not x
g = f(a) + f(b)
if f(a) and f(b):
print("hi")
if f(a) or f(b):
print("hi")
if a or b:
print("bye")
l = [f(i) for i in [a, b]]
f = lambda x: [not i for i in l]
l = [a, b]
if all(f(l)):
print("ok")
else:
l = any(f(l))
""")))
ivp = IntermediateVariablePreprocessor()
random.seed(42)
nb = ivp.preprocess(nb)
print(nb.cells[0].source)
assert len(nb.cells) == 1
assert nb.cells[0].source.strip() == dedent("""\
a = True
b = False
f = (lambda x: (not x))
var_HBRPOI = f(a)
var_G8F1CB = f(b)
g = (var_HBRPOI + var_G8F1CB)
var_FNO6B9 = f(a)
if (var_FNO6B9):
var_M80O2R = f(b)
if (var_FNO6B9 and var_M80O2R):
var_AK1VRJ = print('hi')
var_AK1VRJ
var_NVGFYG = f(a)
if (not (var_NVGFYG)):
var_WWQC38 = f(b)
if (var_NVGFYG or var_WWQC38):
var_HYF9SX = print('hi')
var_HYF9SX
if (a or b):
var_MECOSF = print('bye')
var_MECOSF
l = [f(i) for i in [a, b]]
f = (lambda x: [(not i) for i in l])
l = [a, b]
var_KXWNRE = f(l)
var_K8PK3Y = all(var_KXWNRE)
if var_K8PK3Y:
var_R9OUDO = print('ok')
var_R9OUDO
else:
var_CUZREN = f(l)
l = any(var_CUZREN)
""").strip()
|
normal
|
{
"blob_id": "d9f08e770dacaa86a03d553afd78fdcd725efb62",
"index": 5204,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_preprocessor():\n \"\"\"\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(dedent(\n \"\"\" a = True\n b = False\n f = lambda x: not x\n\n g = f(a) + f(b)\n\n if f(a) and f(b):\n print(\"hi\")\n\n if f(a) or f(b):\n print(\"hi\")\n \n if a or b:\n print(\"bye\")\n\n l = [f(i) for i in [a, b]]\n\n f = lambda x: [not i for i in l]\n l = [a, b]\n if all(f(l)):\n print(\"ok\")\n else:\n l = any(f(l))\n \"\"\"\n )))\n ivp = IntermediateVariablePreprocessor()\n random.seed(42)\n nb = ivp.preprocess(nb)\n print(nb.cells[0].source)\n assert len(nb.cells) == 1\n assert nb.cells[0].source.strip() == dedent(\n \"\"\" a = True\n b = False\n f = (lambda x: (not x))\n var_HBRPOI = f(a)\n var_G8F1CB = f(b)\n g = (var_HBRPOI + var_G8F1CB)\n var_FNO6B9 = f(a)\n if (var_FNO6B9):\n var_M80O2R = f(b)\n if (var_FNO6B9 and var_M80O2R):\n var_AK1VRJ = print('hi')\n var_AK1VRJ\n var_NVGFYG = f(a)\n if (not (var_NVGFYG)):\n var_WWQC38 = f(b)\n if (var_NVGFYG or var_WWQC38):\n var_HYF9SX = print('hi')\n var_HYF9SX\n if (a or b):\n var_MECOSF = print('bye')\n var_MECOSF\n l = [f(i) for i in [a, b]]\n f = (lambda x: [(not i) for i in l])\n l = [a, b]\n var_KXWNRE = f(l)\n var_K8PK3Y = all(var_KXWNRE)\n if var_K8PK3Y:\n var_R9OUDO = print('ok')\n var_R9OUDO\n else:\n var_CUZREN = f(l)\n l = any(var_CUZREN)\n \"\"\"\n ).strip()\n",
"step-3": "<mask token>\nimport random\nimport nbformat\nfrom textwrap import dedent\nfrom pybryt.preprocessors import IntermediateVariablePreprocessor\n\n\ndef test_preprocessor():\n \"\"\"\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(dedent(\n \"\"\" a = True\n b = False\n f = lambda x: not x\n\n g = f(a) + f(b)\n\n if f(a) and f(b):\n print(\"hi\")\n\n if f(a) or f(b):\n print(\"hi\")\n \n if a or b:\n print(\"bye\")\n\n l = [f(i) for i in [a, b]]\n\n f = lambda x: [not i for i in l]\n l = [a, b]\n if all(f(l)):\n print(\"ok\")\n else:\n l = any(f(l))\n \"\"\"\n )))\n ivp = IntermediateVariablePreprocessor()\n random.seed(42)\n nb = ivp.preprocess(nb)\n print(nb.cells[0].source)\n assert len(nb.cells) == 1\n assert nb.cells[0].source.strip() == dedent(\n \"\"\" a = True\n b = False\n f = (lambda x: (not x))\n var_HBRPOI = f(a)\n var_G8F1CB = f(b)\n g = (var_HBRPOI + var_G8F1CB)\n var_FNO6B9 = f(a)\n if (var_FNO6B9):\n var_M80O2R = f(b)\n if (var_FNO6B9 and var_M80O2R):\n var_AK1VRJ = print('hi')\n var_AK1VRJ\n var_NVGFYG = f(a)\n if (not (var_NVGFYG)):\n var_WWQC38 = f(b)\n if (var_NVGFYG or var_WWQC38):\n var_HYF9SX = print('hi')\n var_HYF9SX\n if (a or b):\n var_MECOSF = print('bye')\n var_MECOSF\n l = [f(i) for i in [a, b]]\n f = (lambda x: [(not i) for i in l])\n l = [a, b]\n var_KXWNRE = f(l)\n var_K8PK3Y = all(var_KXWNRE)\n if var_K8PK3Y:\n var_R9OUDO = print('ok')\n var_R9OUDO\n else:\n var_CUZREN = f(l)\n l = any(var_CUZREN)\n \"\"\"\n ).strip()\n",
"step-4": "\"\"\"\"\"\"\n\nimport random\nimport nbformat\n\nfrom textwrap import dedent\n\nfrom pybryt.preprocessors import IntermediateVariablePreprocessor\n\n\ndef test_preprocessor():\n \"\"\"\n \"\"\"\n nb = nbformat.v4.new_notebook()\n nb.cells.append(nbformat.v4.new_code_cell(dedent(\"\"\"\\\n a = True\n b = False\n f = lambda x: not x\n\n g = f(a) + f(b)\n\n if f(a) and f(b):\n print(\"hi\")\n\n if f(a) or f(b):\n print(\"hi\")\n \n if a or b:\n print(\"bye\")\n\n l = [f(i) for i in [a, b]]\n\n f = lambda x: [not i for i in l]\n l = [a, b]\n if all(f(l)):\n print(\"ok\")\n else:\n l = any(f(l))\n \"\"\")))\n\n ivp = IntermediateVariablePreprocessor()\n\n random.seed(42)\n nb = ivp.preprocess(nb)\n print(nb.cells[0].source)\n assert len(nb.cells) == 1\n assert nb.cells[0].source.strip() == dedent(\"\"\"\\\n a = True\n b = False\n f = (lambda x: (not x))\n var_HBRPOI = f(a)\n var_G8F1CB = f(b)\n g = (var_HBRPOI + var_G8F1CB)\n var_FNO6B9 = f(a)\n if (var_FNO6B9):\n var_M80O2R = f(b)\n if (var_FNO6B9 and var_M80O2R):\n var_AK1VRJ = print('hi')\n var_AK1VRJ\n var_NVGFYG = f(a)\n if (not (var_NVGFYG)):\n var_WWQC38 = f(b)\n if (var_NVGFYG or var_WWQC38):\n var_HYF9SX = print('hi')\n var_HYF9SX\n if (a or b):\n var_MECOSF = print('bye')\n var_MECOSF\n l = [f(i) for i in [a, b]]\n f = (lambda x: [(not i) for i in l])\n l = [a, b]\n var_KXWNRE = f(l)\n var_K8PK3Y = all(var_KXWNRE)\n if var_K8PK3Y:\n var_R9OUDO = print('ok')\n var_R9OUDO\n else:\n var_CUZREN = f(l)\n l = any(var_CUZREN)\n \"\"\").strip()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from abc import ABCMeta, abstractmethod
from datetime import datetime
from enum import Enum
from application.response import ResponseError
class ModelBase:
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def _get_cls_schema(cls):
pass
def __new__(cls, schema):
if schema is None:
return None
else:
return object.__new__(cls)
def __init__(self, schema):
self.schema = schema
@property
def id(self):
return self.schema.id
@property
def uid(self):
return self.schema.uid
def refresh(self):
self.schema = self._get_cls_schema().query.get(self.schema.id)
def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):
""" 把用 property 装饰的属性封装到一个 dict 中再返回
:param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性
:param exclude_keys, list, 指定需要排除的属性, 默认为 []
:param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层
:param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys
"""
return_dict = {}
attrs = self.__class__.__dict__
include_keys = include_keys or [
name for name in attrs.keys() if not name.startswith("_")
]
exclude_keys = exclude_keys or []
if lite is True:
lite_exclude_keys = getattr(self, "lite_exclude_keys", [])
exclude_keys = exclude_keys + lite_exclude_keys
include_keys = [name for name in include_keys if name not in exclude_keys]
if depth > 1:
return self.uid
for key, value in attrs.items():
if key not in include_keys:
continue
if not isinstance(value, property):
continue
value = getattr(self, key)
if isinstance(value, Enum):
return_dict[key] = value.value
elif isinstance(value, list):
list_values = []
for item in value:
if hasattr(item, "to_dict"):
list_values.append(item.to_dict(depth=depth + 1, lite=True))
else:
list_values.append(item)
return_dict[key] = list_values
elif isinstance(value, dict):
dict_values = {}
for k, v in value.items():
if hasattr(v, "to_dict"):
dict_values[k] = v.to_dict(depth=depth + 1, lite=True)
else:
dict_values[k] = v
return_dict[key] = dict_values
elif isinstance(value, datetime):
return_dict[key] = value.isoformat()
elif hasattr(value, "to_dict"):
return_dict[key] = value.to_dict(depth=depth + 1, lite=True)
else:
return_dict[key] = value
return return_dict
@classmethod
def get_by_id(cls, id):
schema = cls._get_cls_schema().query.get(id)
if schema is None:
raise ResponseError(info='对应编号信息不存在')
return cls(schema)
@classmethod
def get_by_uid(cls, uid):
schema = cls._get_cls_schema().query.filter_by(uid=uid).first()
return cls(schema)
|
normal
|
{
"blob_id": "5917c891d2885f779dc33f189f1a875efbd0c302",
"index": 163,
"step-1": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n <mask token>\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-2": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n <mask token>\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-3": "<mask token>\n\n\nclass ModelBase:\n <mask token>\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n <mask token>\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False\n ):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n attrs = self.__class__.__dict__\n include_keys = include_keys or [name for name in attrs.keys() if \n not name.startswith('_')]\n exclude_keys = exclude_keys or []\n if lite is True:\n lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])\n exclude_keys = exclude_keys + lite_exclude_keys\n include_keys = [name for name in include_keys if name not in\n exclude_keys]\n if depth > 1:\n return self.uid\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, 'to_dict'):\n list_values.append(item.to_dict(depth=depth + 1,\n lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, 'to_dict'):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n elif hasattr(value, 'to_dict'):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-4": "from abc import ABCMeta, abstractmethod\nfrom datetime import datetime\nfrom enum import Enum\nfrom application.response import ResponseError\n\n\nclass ModelBase:\n __metaclass__ = ABCMeta\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n\n @property\n def id(self):\n return self.schema.id\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False\n ):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n attrs = self.__class__.__dict__\n include_keys = include_keys or [name for name in attrs.keys() if \n not name.startswith('_')]\n exclude_keys = exclude_keys or []\n if lite is True:\n lite_exclude_keys = getattr(self, 'lite_exclude_keys', [])\n exclude_keys = exclude_keys + lite_exclude_keys\n include_keys = [name for name in include_keys if name not in\n exclude_keys]\n if depth > 1:\n return self.uid\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, 'to_dict'):\n list_values.append(item.to_dict(depth=depth + 1,\n lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, 'to_dict'):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n elif hasattr(value, 'to_dict'):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-5": "from abc import ABCMeta, abstractmethod\nfrom datetime import datetime\nfrom enum import Enum\n\nfrom application.response import ResponseError\n\n\nclass ModelBase:\n __metaclass__ = ABCMeta\n\n @classmethod\n @abstractmethod\n def _get_cls_schema(cls):\n pass\n\n def __new__(cls, schema):\n if schema is None:\n return None\n else:\n return object.__new__(cls)\n\n def __init__(self, schema):\n self.schema = schema\n\n @property\n def id(self):\n return self.schema.id\n\n @property\n def uid(self):\n return self.schema.uid\n\n def refresh(self):\n self.schema = self._get_cls_schema().query.get(self.schema.id)\n\n def to_dict(self, include_keys=None, exclude_keys=None, depth=0, lite=False):\n \"\"\" 把用 property 装饰的属性封装到一个 dict 中再返回\n :param include_keys, list, 指定需要返回的属性, 默认为全部, 但不包含下划线开始的属性\n :param exclude_keys, list, 指定需要排除的属性, 默认为 []\n :param depth, int, 深度, object 可能含有对其它 object 的引用, object.to_dict() 调用限定两层\n :param lite, boolean, 是否为精简版, 在精简版中还会考虑 object 的 lite_exclude_keys\n \"\"\"\n return_dict = {}\n\n attrs = self.__class__.__dict__\n\n include_keys = include_keys or [\n name for name in attrs.keys() if not name.startswith(\"_\")\n ]\n exclude_keys = exclude_keys or []\n\n if lite is True:\n lite_exclude_keys = getattr(self, \"lite_exclude_keys\", [])\n exclude_keys = exclude_keys + lite_exclude_keys\n\n include_keys = [name for name in include_keys if name not in exclude_keys]\n\n if depth > 1:\n return self.uid\n\n for key, value in attrs.items():\n if key not in include_keys:\n continue\n if not isinstance(value, property):\n continue\n value = getattr(self, key)\n if isinstance(value, Enum):\n return_dict[key] = value.value\n\n elif isinstance(value, list):\n list_values = []\n for item in value:\n if hasattr(item, \"to_dict\"):\n list_values.append(item.to_dict(depth=depth + 1, lite=True))\n else:\n list_values.append(item)\n return_dict[key] = list_values\n\n elif isinstance(value, dict):\n dict_values = {}\n for k, v in value.items():\n if hasattr(v, \"to_dict\"):\n dict_values[k] = v.to_dict(depth=depth + 1, lite=True)\n else:\n dict_values[k] = v\n return_dict[key] = dict_values\n\n elif isinstance(value, datetime):\n return_dict[key] = value.isoformat()\n\n elif hasattr(value, \"to_dict\"):\n return_dict[key] = value.to_dict(depth=depth + 1, lite=True)\n\n else:\n return_dict[key] = value\n return return_dict\n\n @classmethod\n def get_by_id(cls, id):\n schema = cls._get_cls_schema().query.get(id)\n if schema is None:\n raise ResponseError(info='对应编号信息不存在')\n return cls(schema)\n\n @classmethod\n def get_by_uid(cls, uid):\n schema = cls._get_cls_schema().query.filter_by(uid=uid).first()\n return cls(schema)\n",
"step-ids": [
6,
8,
9,
12,
13
]
}
|
[
6,
8,
9,
12,
13
] |
<|reserved_special_token_0|>
class BondCover(CoverEntity):
<|reserved_special_token_0|>
def __init__(self, bond: Bond, device: BondDevice):
"""Create HA entity representing Bond cover."""
self._bond = bond
self._device = device
@property
def device_class(self) ->Optional[str]:
"""Get device class."""
return DEVICE_CLASS_SHADE
@property
def unique_id(self) ->Optional[str]:
"""Get unique ID for the entity."""
return self._device.device_id
@property
def name(self) ->Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) ->Optional[Dict[str, Any]]:
"""Get a an HA device representing this cover."""
return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device
.device_id)}}
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return None
def open_cover(self, **kwargs: Any) ->None:
"""Open the cover."""
self._bond.open(self._device.device_id)
def close_cover(self, **kwargs: Any) ->None:
"""Close cover."""
self._bond.close(self._device.device_id)
def stop_cover(self, **kwargs):
"""Hold cover."""
self._bond.hold(self._device.device_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BondCover(CoverEntity):
"""Representation of a Bond cover."""
def __init__(self, bond: Bond, device: BondDevice):
"""Create HA entity representing Bond cover."""
self._bond = bond
self._device = device
@property
def device_class(self) ->Optional[str]:
"""Get device class."""
return DEVICE_CLASS_SHADE
@property
def unique_id(self) ->Optional[str]:
"""Get unique ID for the entity."""
return self._device.device_id
@property
def name(self) ->Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) ->Optional[Dict[str, Any]]:
"""Get a an HA device representing this cover."""
return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device
.device_id)}}
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return None
def open_cover(self, **kwargs: Any) ->None:
"""Open the cover."""
self._bond.open(self._device.device_id)
def close_cover(self, **kwargs: Any) ->None:
"""Close cover."""
self._bond.close(self._device.device_id)
def stop_cover(self, **kwargs):
"""Hold cover."""
self._bond.hold(self._device.device_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None]) ->None:
"""Set up Bond cover devices."""
bond: Bond = hass.data[DOMAIN][entry.entry_id]
async def discover():
devices = await get_bond_devices(hass, bond)
covers = [BondCover(bond, device) for device in devices if device.
type == BOND_DEVICE_TYPE_MOTORIZED_SHADES]
async_add_entities(covers)
asyncio.create_task(discover())
class BondCover(CoverEntity):
"""Representation of a Bond cover."""
def __init__(self, bond: Bond, device: BondDevice):
"""Create HA entity representing Bond cover."""
self._bond = bond
self._device = device
@property
def device_class(self) ->Optional[str]:
"""Get device class."""
return DEVICE_CLASS_SHADE
@property
def unique_id(self) ->Optional[str]:
"""Get unique ID for the entity."""
return self._device.device_id
@property
def name(self) ->Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) ->Optional[Dict[str, Any]]:
"""Get a an HA device representing this cover."""
return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device
.device_id)}}
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return None
def open_cover(self, **kwargs: Any) ->None:
"""Open the cover."""
self._bond.open(self._device.device_id)
def close_cover(self, **kwargs: Any) ->None:
"""Close cover."""
self._bond.close(self._device.device_id)
def stop_cover(self, **kwargs):
"""Hold cover."""
self._bond.hold(self._device.device_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import asyncio
import logging
from typing import Any, Callable, Dict, List, Optional
from bond import BOND_DEVICE_TYPE_MOTORIZED_SHADES, Bond
from homeassistant.components.cover import DEVICE_CLASS_SHADE, CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .utils import BondDevice, get_bond_devices
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None]) ->None:
"""Set up Bond cover devices."""
bond: Bond = hass.data[DOMAIN][entry.entry_id]
async def discover():
devices = await get_bond_devices(hass, bond)
covers = [BondCover(bond, device) for device in devices if device.
type == BOND_DEVICE_TYPE_MOTORIZED_SHADES]
async_add_entities(covers)
asyncio.create_task(discover())
class BondCover(CoverEntity):
"""Representation of a Bond cover."""
def __init__(self, bond: Bond, device: BondDevice):
"""Create HA entity representing Bond cover."""
self._bond = bond
self._device = device
@property
def device_class(self) ->Optional[str]:
"""Get device class."""
return DEVICE_CLASS_SHADE
@property
def unique_id(self) ->Optional[str]:
"""Get unique ID for the entity."""
return self._device.device_id
@property
def name(self) ->Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) ->Optional[Dict[str, Any]]:
"""Get a an HA device representing this cover."""
return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device
.device_id)}}
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return None
def open_cover(self, **kwargs: Any) ->None:
"""Open the cover."""
self._bond.open(self._device.device_id)
def close_cover(self, **kwargs: Any) ->None:
"""Close cover."""
self._bond.close(self._device.device_id)
def stop_cover(self, **kwargs):
"""Hold cover."""
self._bond.hold(self._device.device_id)
<|reserved_special_token_1|>
"""Support for Bond covers."""
import asyncio
import logging
from typing import Any, Callable, Dict, List, Optional
from bond import BOND_DEVICE_TYPE_MOTORIZED_SHADES, Bond
from homeassistant.components.cover import DEVICE_CLASS_SHADE, CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .utils import BondDevice, get_bond_devices
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None],
) -> None:
"""Set up Bond cover devices."""
bond: Bond = hass.data[DOMAIN][entry.entry_id]
async def discover():
devices = await get_bond_devices(hass, bond)
covers = [
BondCover(bond, device)
for device in devices
if device.type == BOND_DEVICE_TYPE_MOTORIZED_SHADES
]
async_add_entities(covers)
asyncio.create_task(discover())
class BondCover(CoverEntity):
"""Representation of a Bond cover."""
def __init__(self, bond: Bond, device: BondDevice):
"""Create HA entity representing Bond cover."""
self._bond = bond
self._device = device
@property
def device_class(self) -> Optional[str]:
"""Get device class."""
return DEVICE_CLASS_SHADE
@property
def unique_id(self) -> Optional[str]:
"""Get unique ID for the entity."""
return self._device.device_id
@property
def name(self) -> Optional[str]:
"""Get entity name."""
return self._device.name
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Get a an HA device representing this cover."""
return {ATTR_NAME: self.name, "identifiers": {(DOMAIN, self._device.device_id)}}
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return None
def open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
self._bond.open(self._device.device_id)
def close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
self._bond.close(self._device.device_id)
def stop_cover(self, **kwargs):
"""Hold cover."""
self._bond.hold(self._device.device_id)
|
flexible
|
{
"blob_id": "ba9d7b877eda3f7469db58e2ee194b601e3c3e08",
"index": 4227,
"step-1": "<mask token>\n\n\nclass BondCover(CoverEntity):\n <mask token>\n\n def __init__(self, bond: Bond, device: BondDevice):\n \"\"\"Create HA entity representing Bond cover.\"\"\"\n self._bond = bond\n self._device = device\n\n @property\n def device_class(self) ->Optional[str]:\n \"\"\"Get device class.\"\"\"\n return DEVICE_CLASS_SHADE\n\n @property\n def unique_id(self) ->Optional[str]:\n \"\"\"Get unique ID for the entity.\"\"\"\n return self._device.device_id\n\n @property\n def name(self) ->Optional[str]:\n \"\"\"Get entity name.\"\"\"\n return self._device.name\n\n @property\n def device_info(self) ->Optional[Dict[str, Any]]:\n \"\"\"Get a an HA device representing this cover.\"\"\"\n return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device\n .device_id)}}\n\n @property\n def is_closed(self):\n \"\"\"Return if the cover is closed or not.\"\"\"\n return None\n\n def open_cover(self, **kwargs: Any) ->None:\n \"\"\"Open the cover.\"\"\"\n self._bond.open(self._device.device_id)\n\n def close_cover(self, **kwargs: Any) ->None:\n \"\"\"Close cover.\"\"\"\n self._bond.close(self._device.device_id)\n\n def stop_cover(self, **kwargs):\n \"\"\"Hold cover.\"\"\"\n self._bond.hold(self._device.device_id)\n",
"step-2": "<mask token>\n\n\nclass BondCover(CoverEntity):\n \"\"\"Representation of a Bond cover.\"\"\"\n\n def __init__(self, bond: Bond, device: BondDevice):\n \"\"\"Create HA entity representing Bond cover.\"\"\"\n self._bond = bond\n self._device = device\n\n @property\n def device_class(self) ->Optional[str]:\n \"\"\"Get device class.\"\"\"\n return DEVICE_CLASS_SHADE\n\n @property\n def unique_id(self) ->Optional[str]:\n \"\"\"Get unique ID for the entity.\"\"\"\n return self._device.device_id\n\n @property\n def name(self) ->Optional[str]:\n \"\"\"Get entity name.\"\"\"\n return self._device.name\n\n @property\n def device_info(self) ->Optional[Dict[str, Any]]:\n \"\"\"Get a an HA device representing this cover.\"\"\"\n return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device\n .device_id)}}\n\n @property\n def is_closed(self):\n \"\"\"Return if the cover is closed or not.\"\"\"\n return None\n\n def open_cover(self, **kwargs: Any) ->None:\n \"\"\"Open the cover.\"\"\"\n self._bond.open(self._device.device_id)\n\n def close_cover(self, **kwargs: Any) ->None:\n \"\"\"Close cover.\"\"\"\n self._bond.close(self._device.device_id)\n\n def stop_cover(self, **kwargs):\n \"\"\"Hold cover.\"\"\"\n self._bond.hold(self._device.device_id)\n",
"step-3": "<mask token>\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry,\n async_add_entities: Callable[[List[Entity]], None]) ->None:\n \"\"\"Set up Bond cover devices.\"\"\"\n bond: Bond = hass.data[DOMAIN][entry.entry_id]\n\n async def discover():\n devices = await get_bond_devices(hass, bond)\n covers = [BondCover(bond, device) for device in devices if device.\n type == BOND_DEVICE_TYPE_MOTORIZED_SHADES]\n async_add_entities(covers)\n asyncio.create_task(discover())\n\n\nclass BondCover(CoverEntity):\n \"\"\"Representation of a Bond cover.\"\"\"\n\n def __init__(self, bond: Bond, device: BondDevice):\n \"\"\"Create HA entity representing Bond cover.\"\"\"\n self._bond = bond\n self._device = device\n\n @property\n def device_class(self) ->Optional[str]:\n \"\"\"Get device class.\"\"\"\n return DEVICE_CLASS_SHADE\n\n @property\n def unique_id(self) ->Optional[str]:\n \"\"\"Get unique ID for the entity.\"\"\"\n return self._device.device_id\n\n @property\n def name(self) ->Optional[str]:\n \"\"\"Get entity name.\"\"\"\n return self._device.name\n\n @property\n def device_info(self) ->Optional[Dict[str, Any]]:\n \"\"\"Get a an HA device representing this cover.\"\"\"\n return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device\n .device_id)}}\n\n @property\n def is_closed(self):\n \"\"\"Return if the cover is closed or not.\"\"\"\n return None\n\n def open_cover(self, **kwargs: Any) ->None:\n \"\"\"Open the cover.\"\"\"\n self._bond.open(self._device.device_id)\n\n def close_cover(self, **kwargs: Any) ->None:\n \"\"\"Close cover.\"\"\"\n self._bond.close(self._device.device_id)\n\n def stop_cover(self, **kwargs):\n \"\"\"Hold cover.\"\"\"\n self._bond.hold(self._device.device_id)\n",
"step-4": "<mask token>\nimport asyncio\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional\nfrom bond import BOND_DEVICE_TYPE_MOTORIZED_SHADES, Bond\nfrom homeassistant.components.cover import DEVICE_CLASS_SHADE, CoverEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_NAME\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import Entity\nfrom .const import DOMAIN\nfrom .utils import BondDevice, get_bond_devices\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry,\n async_add_entities: Callable[[List[Entity]], None]) ->None:\n \"\"\"Set up Bond cover devices.\"\"\"\n bond: Bond = hass.data[DOMAIN][entry.entry_id]\n\n async def discover():\n devices = await get_bond_devices(hass, bond)\n covers = [BondCover(bond, device) for device in devices if device.\n type == BOND_DEVICE_TYPE_MOTORIZED_SHADES]\n async_add_entities(covers)\n asyncio.create_task(discover())\n\n\nclass BondCover(CoverEntity):\n \"\"\"Representation of a Bond cover.\"\"\"\n\n def __init__(self, bond: Bond, device: BondDevice):\n \"\"\"Create HA entity representing Bond cover.\"\"\"\n self._bond = bond\n self._device = device\n\n @property\n def device_class(self) ->Optional[str]:\n \"\"\"Get device class.\"\"\"\n return DEVICE_CLASS_SHADE\n\n @property\n def unique_id(self) ->Optional[str]:\n \"\"\"Get unique ID for the entity.\"\"\"\n return self._device.device_id\n\n @property\n def name(self) ->Optional[str]:\n \"\"\"Get entity name.\"\"\"\n return self._device.name\n\n @property\n def device_info(self) ->Optional[Dict[str, Any]]:\n \"\"\"Get a an HA device representing this cover.\"\"\"\n return {ATTR_NAME: self.name, 'identifiers': {(DOMAIN, self._device\n .device_id)}}\n\n @property\n def is_closed(self):\n \"\"\"Return if the cover is closed or not.\"\"\"\n return None\n\n def open_cover(self, **kwargs: Any) ->None:\n \"\"\"Open the cover.\"\"\"\n self._bond.open(self._device.device_id)\n\n def close_cover(self, **kwargs: Any) ->None:\n \"\"\"Close cover.\"\"\"\n self._bond.close(self._device.device_id)\n\n def stop_cover(self, **kwargs):\n \"\"\"Hold cover.\"\"\"\n self._bond.hold(self._device.device_id)\n",
"step-5": "\"\"\"Support for Bond covers.\"\"\"\nimport asyncio\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional\n\nfrom bond import BOND_DEVICE_TYPE_MOTORIZED_SHADES, Bond\n\nfrom homeassistant.components.cover import DEVICE_CLASS_SHADE, CoverEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_NAME\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import Entity\n\nfrom .const import DOMAIN\nfrom .utils import BondDevice, get_bond_devices\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: Callable[[List[Entity]], None],\n) -> None:\n \"\"\"Set up Bond cover devices.\"\"\"\n\n bond: Bond = hass.data[DOMAIN][entry.entry_id]\n\n async def discover():\n devices = await get_bond_devices(hass, bond)\n covers = [\n BondCover(bond, device)\n for device in devices\n if device.type == BOND_DEVICE_TYPE_MOTORIZED_SHADES\n ]\n async_add_entities(covers)\n\n asyncio.create_task(discover())\n\n\nclass BondCover(CoverEntity):\n \"\"\"Representation of a Bond cover.\"\"\"\n\n def __init__(self, bond: Bond, device: BondDevice):\n \"\"\"Create HA entity representing Bond cover.\"\"\"\n self._bond = bond\n self._device = device\n\n @property\n def device_class(self) -> Optional[str]:\n \"\"\"Get device class.\"\"\"\n return DEVICE_CLASS_SHADE\n\n @property\n def unique_id(self) -> Optional[str]:\n \"\"\"Get unique ID for the entity.\"\"\"\n return self._device.device_id\n\n @property\n def name(self) -> Optional[str]:\n \"\"\"Get entity name.\"\"\"\n return self._device.name\n\n @property\n def device_info(self) -> Optional[Dict[str, Any]]:\n \"\"\"Get a an HA device representing this cover.\"\"\"\n return {ATTR_NAME: self.name, \"identifiers\": {(DOMAIN, self._device.device_id)}}\n\n @property\n def is_closed(self):\n \"\"\"Return if the cover is closed or not.\"\"\"\n return None\n\n def open_cover(self, **kwargs: Any) -> None:\n \"\"\"Open the cover.\"\"\"\n self._bond.open(self._device.device_id)\n\n def close_cover(self, **kwargs: Any) -> None:\n \"\"\"Close cover.\"\"\"\n self._bond.close(self._device.device_id)\n\n def stop_cover(self, **kwargs):\n \"\"\"Hold cover.\"\"\"\n self._bond.hold(self._device.device_id)\n",
"step-ids": [
10,
11,
12,
14,
15
]
}
|
[
10,
11,
12,
14,
15
] |
<|reserved_special_token_0|>
class Memory:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DesignTracker:
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
elif ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'],
'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0, 0, 0):
break
read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for
l, d in zip(progress[i][0], progress[i][1])]) +
', Merit {:.3f}'.format(progress[i][2])])
return read_progress
<|reserved_special_token_0|>
class TMM_sim:
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),
substrate='Cr', substrate_thick=500):
"""
This class returns the spectrum given the designed structures.
"""
self.mats = mats
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'
] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
"""
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
"""
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.0j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],
kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
"""
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
"""
degree = pi / 180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick)
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1000.0):
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1
]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
self.nk_dict[self.substrate][i], 1]
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(
thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.
substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self
.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),
'T: Average = {:.2f}%'.format(np.mean(T) * 100),
'A: Average = {:.2f}%'.format(np.mean(A) * 100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
<|reserved_special_token_0|>
def merge_layers(categories, thicknesses):
"""
Merges consecutive layers with the same material types.
"""
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
<|reserved_special_token_0|>
class DesignTracker:
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
elif ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'],
'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0, 0, 0):
break
read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for
l, d in zip(progress[i][0], progress[i][1])]) +
', Merit {:.3f}'.format(progress[i][2])])
return read_progress
<|reserved_special_token_0|>
class TMM_sim:
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),
substrate='Cr', substrate_thick=500):
"""
This class returns the spectrum given the designed structures.
"""
self.mats = mats
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'
] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
"""
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
"""
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.0j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],
kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
"""
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
"""
degree = pi / 180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick)
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1000.0):
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1
]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
self.nk_dict[self.substrate][i], 1]
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(
thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.
substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self
.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),
'T: Average = {:.2f}%'.format(np.mean(T) * 100),
'A: Average = {:.2f}%'.format(np.mean(A) * 100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
<|reserved_special_token_0|>
def combine_tracker(folder):
"""
Merge all buffers
"""
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in
trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),
'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size': 14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp + '_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.
argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
<|reserved_special_token_0|>
def merge_layers(categories, thicknesses):
"""
Merges consecutive layers with the same material types.
"""
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
"""
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
"""
def threshold(value):
"""
"""
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(min(max(15, int(value * max_value // 2)),
max_value))
elif name in METALS:
thickness.append(min(max(5, int(value * max_value // 2)),
max_value))
elif name in INSULATORS:
thickness.append(min(max(1, int(value * max_value // 2)),
max_value))
else:
raise ValueError('Material not known')
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker:
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
elif ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'],
'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0, 0, 0):
break
read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for
l, d in zip(progress[i][0], progress[i][1])]) +
', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip
(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim:
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),
substrate='Cr', substrate_thick=500):
"""
This class returns the spectrum given the designed structures.
"""
self.mats = mats
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'
] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
"""
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
"""
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.0j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],
kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
"""
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
"""
degree = pi / 180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick)
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1000.0):
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1
]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
self.nk_dict[self.substrate][i], 1]
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(
thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.
substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self
.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),
'T: Average = {:.2f}%'.format(np.mean(T) * 100),
'A: Average = {:.2f}%'.format(np.mean(A) * 100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
<|reserved_special_token_0|>
def combine_tracker(folder):
"""
Merge all buffers
"""
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in
trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),
'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size': 14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp + '_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.
argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
def load_exp_res(folder):
subfolders = [item for item in glob.glob(folder + '/*')]
def read_hyper(file_name, rep=10):
with open(os.path.join(file_name, 'config.json')) as f:
hypers = json.load(f)
hypers_dict = {}
for k, v in hypers.items():
if k.startswith('logger'):
continue
elif isinstance(v, dict):
for kk, vv in v.items():
if isinstance(vv, list):
hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep
else:
hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep
else:
hypers_dict[k] = [v] * rep
hyper_df = pd.DataFrame(hypers_dict)
return hyper_df
first = True
for subfolder in tqdm(subfolders):
runs = glob.glob(subfolder + '/*')
num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),
sep='\t'))
for run in runs:
tracker = combine_tracker(run)
progress = tracker.print_progress()
best_design = progress[np.argmax(tracker.max_ret_ls)]
if first:
df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\t')
hyper_df = read_hyper(run, rep=len(df))
best_designs_df = pd.DataFrame([{'best_design': best_design
}] * len(df))
df = pd.concat([df, hyper_df, best_designs_df], axis=1)
first = False
else:
df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\t')
hyper_df = read_hyper(run, rep=len(df_))
best_designs_df = pd.DataFrame([{'best_design': best_design
}] * len(df_))
df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)
df = pd.concat([df, df_], axis=0)
return df
def finetune(simulator, m0, x0, target, display=False, bounds=None):
"""
Finetune the structure using quasi-Newton's method.
Args:
m0: materials list given by the upstream RL
x0: thicknesses given by the upstream RL
display: if true, then plot the spectrum before and after the finetuning.
Returns:
x_opt: finetuned thickness list
"""
def objective_func(x):
R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])
return 1 - cal_reward(R, T, A, target)
if bounds is None:
bounds = [(15, 200)] * len(x0)
res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})
x_opt = [int(item) for item in res.x]
if display:
plt.figure()
simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)
plt.figure()
simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,
plot=True)
return x_opt, res
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
def batch_spectrum(env, names_list, thickness_list):
def spectrum(args):
"""
Inputs:
1. names: list of lists, each list correspond to the structures
2. thickness: list of lists
"""
names, thickness = args
R, T, A = env.spectrum(names, thickness, 0, False)
return R, T, A
res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args) for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
"""
Merges consecutive layers with the same material types.
"""
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
"""
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
"""
def threshold(value):
"""
"""
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(min(max(15, int(value * max_value // 2)),
max_value))
elif name in METALS:
thickness.append(min(max(5, int(value * max_value // 2)),
max_value))
elif name in INSULATORS:
thickness.append(min(max(1, int(value * max_value // 2)),
max_value))
else:
raise ValueError('Material not known')
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker:
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
elif ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'],
'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0, 0, 0):
break
read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for
l, d in zip(progress[i][0], progress[i][1])]) +
', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip
(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim:
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),
substrate='Cr', substrate_thick=500):
"""
This class returns the spectrum given the designed structures.
"""
self.mats = mats
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'
] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
"""
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
"""
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.0j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],
kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
"""
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
"""
degree = pi / 180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick)
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1000.0):
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1
]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [
self.nk_dict[self.substrate][i], 1]
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(
thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.
substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self
.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),
'T: Average = {:.2f}%'.format(np.mean(T) * 100),
'A: Average = {:.2f}%'.format(np.mean(A) * 100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep='\t')
width = 0.5
if ax is None:
fig, ax = plt.subplots(2, 1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha
)
sns.lineplot(x=x, y='AverageEpRet', data=df, ax=ax[1], color=color,
alpha=alpha)
plt.fill_between(df[x], df['AverageEpRet'] - width / 2 * df['StdEpRet'],
df['AverageEpRet'] + width / 2 * df['StdEpRet'], alpha=0.3, color=color
)
return df
def combine_tracker(folder):
"""
Merge all buffers
"""
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in
trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),
'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size': 14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2, 1, figsize=(10, 8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp + '_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.
argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
def load_exp_res(folder):
subfolders = [item for item in glob.glob(folder + '/*')]
def read_hyper(file_name, rep=10):
with open(os.path.join(file_name, 'config.json')) as f:
hypers = json.load(f)
hypers_dict = {}
for k, v in hypers.items():
if k.startswith('logger'):
continue
elif isinstance(v, dict):
for kk, vv in v.items():
if isinstance(vv, list):
hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep
else:
hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep
else:
hypers_dict[k] = [v] * rep
hyper_df = pd.DataFrame(hypers_dict)
return hyper_df
first = True
for subfolder in tqdm(subfolders):
runs = glob.glob(subfolder + '/*')
num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),
sep='\t'))
for run in runs:
tracker = combine_tracker(run)
progress = tracker.print_progress()
best_design = progress[np.argmax(tracker.max_ret_ls)]
if first:
df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\t')
hyper_df = read_hyper(run, rep=len(df))
best_designs_df = pd.DataFrame([{'best_design': best_design
}] * len(df))
df = pd.concat([df, hyper_df, best_designs_df], axis=1)
first = False
else:
df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\t')
hyper_df = read_hyper(run, rep=len(df_))
best_designs_df = pd.DataFrame([{'best_design': best_design
}] * len(df_))
df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)
df = pd.concat([df, df_], axis=0)
return df
def finetune(simulator, m0, x0, target, display=False, bounds=None):
"""
Finetune the structure using quasi-Newton's method.
Args:
m0: materials list given by the upstream RL
x0: thicknesses given by the upstream RL
display: if true, then plot the spectrum before and after the finetuning.
Returns:
x_opt: finetuned thickness list
"""
def objective_func(x):
R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])
return 1 - cal_reward(R, T, A, target)
if bounds is None:
bounds = [(15, 200)] * len(x0)
res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})
x_opt = [int(item) for item in res.x]
if display:
plt.figure()
simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)
plt.figure()
simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,
plot=True)
return x_opt, res
<|reserved_special_token_1|>
from mpi4py import MPI
import matplotlib
from tmm import coh_tmm
import pandas as pd
import os
from numpy import pi
from scipy.interpolate import interp1d
from joblib import Parallel, delayed
import numpy as np
import glob
import matplotlib.pyplot as plt
import pickle as pkl
import seaborn as sns
from scipy.optimize import minimize
import json
from tqdm import tqdm
DATABASE = './data'
INSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']
METALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']
num_workers = 8
def cal_reward(R, T, A, target):
'''
Calculate reward based on given spectrums.
We calculate the reward using averaged (1-mse).
Args:
R, T, A: numpy array. Reflection, transmission, and
absorption spectrums, respectively.
target: dict. {'R':np.array, 'T':np.array, 'A':np.array}
Returns:
reward: float. Reward for the spectrum.
'''
reward = 0
for k, v in target.items():
if k == 'R':
res = R
elif k == 'T':
res = T
else:
res = A
reward += 1 - np.abs(res.squeeze() - v).mean()
reward /= len(target)
return reward
class Memory:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
def batch_spectrum(env, names_list, thickness_list):
def spectrum(args):
'''
Inputs:
1. names: list of lists, each list correspond to the structures
2. thickness: list of lists
'''
names, thickness = args
R, T, A = env.spectrum(names, thickness, 0, False)
return R, T, A
res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)
for args in
zip(names_list, thickness_list))
res = np.array(res)
Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]
return Rs, Ts, As
def merge_layers(categories, thicknesses):
'''
Merges consecutive layers with the same material types.
'''
thicknesses = thicknesses[1:-1]
c_output = [categories[0]]
t_output = [thicknesses[0]]
for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):
if c == c_output[-1]:
t_output[-1] += d
continue
else:
c_output.append(c)
t_output.append(d)
t_output.insert(0, np.inf)
t_output.insert(len(t_output), np.inf)
return c_output, t_output
def get_structure(categories, values, materials, ds, continuous=False,
max_value=400):
'''
Given categories and values, return the strucure in the form
(name (str), thickness (nm))
'''
def threshold(value):
'''
'''
names = [materials[item] for item in categories]
if not continuous:
thickness = [np.inf] + [ds[item] for item in values] + [np.inf]
else:
thickness = []
for category, value in zip(categories, values):
name = materials[category]
if name == 'Ag':
thickness.append(
min(max(15, int(value * max_value//2)), max_value))
elif name in METALS:
thickness.append(
min(max(5, int(value * max_value//2)), max_value))
elif name in INSULATORS:
thickness.append(
min(max(1, int(value * max_value//2)), max_value))
else:
raise ValueError('Material not known')
# thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,
# item in enumerate(values)] + [np.inf]
thickness = [np.inf] + thickness + [np.inf]
return names, thickness
class DesignTracker():
def __init__(self, epochs, **kwargs):
"""
This class tracks the best designs discovered.
"""
if epochs == -1:
self.layer_ls = []
self.thick_ls = []
self.max_ret_ls = []
self.layer_ls = [0] * epochs
self.thick_ls = [0] * epochs
self.max_ret_ls = [0] * epochs
self.kwargs = kwargs
self.current_e = 0
def store(self, layers, thicknesses, ret, e, append_mode=False):
if append_mode:
self.layer_ls.append(layers)
self.thick_ls.append(thicknesses)
self.max_ret_ls.append(ret)
else:
if ret >= self.max_ret_ls[e]:
self.layer_ls[e] = layers
self.thick_ls[e] = thicknesses
self.max_ret_ls[e] = ret
def save_state(self):
# save buffer from all processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))
pkl.dump(self, open(filename, 'wb'))
def print_progress(self):
progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))
read_progress = []
for i in range(len(progress)):
if progress[i] == (0,0,0):
break
read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])
return read_progress
def print_progress(progress):
for i in range(len(progress)):
print(progress[i], 0)
progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]
return progress
class TMM_sim():
def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):
'''
This class returns the spectrum given the designed structures.
'''
self.mats = mats
# include substrate
self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats
self.wavelength = wavelength
self.nk_dict = self.load_materials()
self.substrate = substrate
self.substrate_thick = substrate_thick
def load_materials(self):
'''
Load material nk and return corresponding interpolators.
Return:
nk_dict: dict, key -- material name, value: n, k in the
self.wavelength range
'''
nk_dict = {}
for mat in self.all_mats:
nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))
nk.dropna(inplace=True)
wl = nk['wl'].to_numpy()
index = (nk['n'] + nk['k'] * 1.j).to_numpy()
mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))
mat_nk_fn = interp1d(
mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')
nk_dict[mat] = mat_nk_fn(self.wavelength)
return nk_dict
def spectrum(self, materials, thickness, theta=0, plot=False, title=False):
'''
Input:
materials: list
thickness: list
theta: degree, the incidence angle
Return:
s: array, spectrum
'''
degree = pi/180
if self.substrate != 'Air':
thickness.insert(-1, self.substrate_thick) # substrate thickness
R, T, A = [], [], []
for i, lambda_vac in enumerate(self.wavelength * 1e3):
# we assume the last layer is glass
if self.substrate == 'Glass':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]
elif self.substrate == 'Air':
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]
else:
n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]
# n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]
# mport pdb; pdb.set_trace()
res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)
R.append(res['R'])
T.append(res['T'])
R, T = np.array(R), np.array(T)
A = 1 - R - T
if plot:
self.plot_spectrum(R, T, A)
if title:
thick = thickness[1:-1]
title = ' | '.join(['{}nm {}'.format(d, m)
for d, m in zip(thick, materials)])
if self.substrate is not 'Air':
title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'
else:
title = 'Air | ' + title + ' | Air'
plt.title(title, **{'size': '10'})
return R, T, A
def plot_spectrum(self, R, T, A):
plt.plot(self.wavelength * 1000, R, self.wavelength *
1000, T, self.wavelength * 1000, A, linewidth=3)
plt.ylabel('R/T/A')
plt.xlabel('Wavelength (nm)')
plt.legend(['R: Average = {:.2f}%'.
format(np.mean(R)*100),
'T: Average = {:.2f}%'.
format(np.mean(T)*100),
'A: Average = {:.2f}%'.
format(np.mean(A)*100)])
plt.grid('on', linestyle='--')
plt.ylim([0, 1])
# Plotting utils
def visualize_progress(file, x, ax=None, color='b', alpha=1):
df = pd.read_csv(file, sep="\t")
width = 0.5
# x = 'Time'
if ax is None:
fig, ax = plt.subplots(2,1)
sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)
# ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])
sns.lineplot(x=x, y='AverageEpRet', data=df,
ax=ax[1], color=color, alpha=alpha)
plt.fill_between(df[x],
df['AverageEpRet']-width/2*df['StdEpRet'],
df['AverageEpRet']+width/2*df['StdEpRet'],
alpha=0.3, color=color)
return df
def combine_tracker(folder):
'''
Merge all buffers
'''
trackers = []
if 'design_tracker_merged.pkl' in os.listdir(folder):
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
combined_tracker = pkl.load(open(tracker_file, 'rb'))
return combined_tracker
for file in os.listdir(folder):
if file.startswith('design_tracker_'):
tracker_file = os.path.join(folder, file)
trackers.append(pkl.load(open(tracker_file, 'rb')))
combined_tracker = DesignTracker(len(trackers[0].layer_ls))
max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)
for e in range(len(trackers[0].layer_ls)):
combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]
combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]
combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]
if combined_tracker.layer_ls[-1] != 0:
tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')
pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))
return combined_tracker
def summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):
root = '../spinningup/data/'
progress_ls = []
max_ret_ls = []
params = {'size':14}
matplotlib.rc('font', **params)
fig, ax = plt.subplots(2,1, figsize=(10,8))
for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):
folder = os.path.join(root, exp, exp+'_s{}'.format(seed))
progress_file = os.path.join(folder, 'progress.txt')
df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)
tracker = combine_tracker(folder)
progress = tracker.print_progress()
print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))
progress_ls.append(progress)
max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))
ax[0].legend(max_ret_ls)
ax[1].legend(exp_ls)
plt.show()
return progress_ls
def load_exp_res(folder):
subfolders = [item for item in glob.glob(folder+'/*')]
def read_hyper(file_name, rep=10):
with open(os.path.join(file_name, 'config.json')) as f:
hypers = json.load(f)
hypers_dict = {}
for k, v in hypers.items():
if k.startswith('logger'):
continue
elif isinstance(v, dict):
for kk, vv in v.items():
if isinstance(vv, list):
hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep
else:
hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep
else:
hypers_dict[k] = [v] * rep
hyper_df = pd.DataFrame(hypers_dict)
return hyper_df
first=True # first pandas file to load
for subfolder in tqdm(subfolders):
runs = glob.glob(subfolder+'/*')
num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\t'))
for run in runs:
tracker = combine_tracker(run)
progress = tracker.print_progress()
best_design = progress[np.argmax(tracker.max_ret_ls)]
if first:
df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))
df = pd.concat([df, hyper_df, best_designs_df], axis=1)
first = False
else:
df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\t')
hyper_df = read_hyper(run, rep=len(df_))
best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))
df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)
df = pd.concat([df, df_], axis=0)
return df
def finetune(simulator, m0, x0, target, display=False, bounds=None):
'''
Finetune the structure using quasi-Newton's method.
Args:
m0: materials list given by the upstream RL
x0: thicknesses given by the upstream RL
display: if true, then plot the spectrum before and after the finetuning.
Returns:
x_opt: finetuned thickness list
'''
def objective_func(x):
R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf])
return 1-cal_reward(R, T, A, target)
if bounds is None:
bounds = [(15, 200)] * len(x0)
res = minimize(objective_func, x0, bounds=bounds, options={'disp':True})
x_opt = [int(item) for item in res.x]
if display:
plt.figure()
simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True)
plt.figure()
simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True)
return x_opt, res
|
flexible
|
{
"blob_id": "f23bc0c277967d8e7a94a49c5a81ed5fb75d36cc",
"index": 9327,
"step-1": "<mask token>\n\n\nclass Memory:\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\n<mask token>\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\n<mask token>\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\n<mask token>\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\n<mask token>\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\n<mask token>\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n \"\"\"\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n \"\"\"\n\n def threshold(value):\n \"\"\"\n\n \"\"\"\n names = [materials[item] for item in categories]\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(min(max(15, int(value * max_value // 2)),\n max_value))\n elif name in METALS:\n thickness.append(min(max(5, int(value * max_value // 2)),\n max_value))\n elif name in INSULATORS:\n thickness.append(min(max(1, int(value * max_value // 2)),\n max_value))\n else:\n raise ValueError('Material not known')\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\ndef print_progress(progress):\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip\n (progress[i][0], progress[i][1])]), progress[i][2]]\n return progress\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n<mask token>\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder + '/*')]\n\n def read_hyper(file_name, rep=10):\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep\n else:\n hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep\n else:\n hypers_dict[k] = [v] * rep\n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df\n first = True\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder + '/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),\n sep='\\t'))\n for run in runs:\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0)\n return df\n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n \"\"\"\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n \"\"\"\n\n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])\n return 1 - cal_reward(R, T, A, target)\n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})\n x_opt = [int(item) for item in res.x]\n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,\n plot=True)\n return x_opt, res\n",
"step-4": "<mask token>\n\n\nclass Memory:\n\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\ndef batch_spectrum(env, names_list, thickness_list):\n\n def spectrum(args):\n \"\"\"\n Inputs: \n 1. names: list of lists, each list correspond to the structures\n 2. thickness: list of lists\n \"\"\"\n names, thickness = args\n R, T, A = env.spectrum(names, thickness, 0, False)\n return R, T, A\n res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args) for args in\n zip(names_list, thickness_list))\n res = np.array(res)\n Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]\n return Rs, Ts, As\n\n\ndef merge_layers(categories, thicknesses):\n \"\"\"\n Merges consecutive layers with the same material types.\n \"\"\"\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n \"\"\"\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n \"\"\"\n\n def threshold(value):\n \"\"\"\n\n \"\"\"\n names = [materials[item] for item in categories]\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(min(max(15, int(value * max_value // 2)),\n max_value))\n elif name in METALS:\n thickness.append(min(max(5, int(value * max_value // 2)),\n max_value))\n elif name in INSULATORS:\n thickness.append(min(max(1, int(value * max_value // 2)),\n max_value))\n else:\n raise ValueError('Material not known')\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\n\nclass DesignTracker:\n\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n elif ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'],\n 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n\n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0, 0, 0):\n break\n read_progress.append(['|'.join([(l + ' ' + str(d) + ' nm') for \n l, d in zip(progress[i][0], progress[i][1])]) +\n ', Merit {:.3f}'.format(progress[i][2])])\n return read_progress\n\n\ndef print_progress(progress):\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([(l + ' ' + str(d) + ' nm') for l, d in zip\n (progress[i][0], progress[i][1])]), progress[i][2]]\n return progress\n\n\nclass TMM_sim:\n\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01),\n substrate='Cr', substrate_thick=500):\n \"\"\"\n This class returns the spectrum given the designed structures.\n \"\"\"\n self.mats = mats\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'\n ] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n \"\"\"\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n \"\"\"\n nk_dict = {}\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.0j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n mat_nk_fn = interp1d(mat_nk_data[:, 0].real, mat_nk_data[:, 1],\n kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n \"\"\"\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n \"\"\"\n degree = pi / 180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick)\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1000.0):\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n 1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1\n ]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [\n self.nk_dict[self.substrate][i], 1]\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m) for d, m in zip(\n thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.\n substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n plt.plot(self.wavelength * 1000, R, self.wavelength * 1000, T, self\n .wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.format(np.mean(R) * 100),\n 'T: Average = {:.2f}%'.format(np.mean(T) * 100),\n 'A: Average = {:.2f}%'.format(np.mean(A) * 100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\ndef visualize_progress(file, x, ax=None, color='b', alpha=1):\n df = pd.read_csv(file, sep='\\t')\n width = 0.5\n if ax is None:\n fig, ax = plt.subplots(2, 1)\n sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha\n )\n sns.lineplot(x=x, y='AverageEpRet', data=df, ax=ax[1], color=color,\n alpha=alpha)\n plt.fill_between(df[x], df['AverageEpRet'] - width / 2 * df['StdEpRet'],\n df['AverageEpRet'] + width / 2 * df['StdEpRet'], alpha=0.3, color=color\n )\n return df\n\n\ndef combine_tracker(folder):\n \"\"\"\n Merge all buffers\n \"\"\"\n trackers = []\n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb')))\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in\n trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file),\n 'wb'))\n return combined_tracker\n\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n params = {'size': 14}\n matplotlib.rc('font', **params)\n fig, ax = plt.subplots(2, 1, figsize=(10, 8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp + '_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.\n argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder + '/*')]\n\n def read_hyper(file_name, rep=10):\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k) + '_' + str(kk)] = [vv[0]] * rep\n else:\n hypers_dict[str(k) + '_' + str(kk)] = [vv] * rep\n else:\n hypers_dict[k] = [v] * rep\n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df\n first = True\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder + '/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),\n sep='\\t'))\n for run in runs:\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'), sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design': best_design\n }] * len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0)\n return df\n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n \"\"\"\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n \"\"\"\n\n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf] + list(x) + [np.inf])\n return 1 - cal_reward(R, T, A, target)\n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n res = minimize(objective_func, x0, bounds=bounds, options={'disp': True})\n x_opt = [int(item) for item in res.x]\n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x0 + [np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf] + x_opt + [np.inf], title=True,\n plot=True)\n return x_opt, res\n",
"step-5": "from mpi4py import MPI\nimport matplotlib\nfrom tmm import coh_tmm\nimport pandas as pd\nimport os\nfrom numpy import pi\nfrom scipy.interpolate import interp1d\nfrom joblib import Parallel, delayed\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport pickle as pkl\nimport seaborn as sns\nfrom scipy.optimize import minimize\nimport json\nfrom tqdm import tqdm\n\nDATABASE = './data'\nINSULATORS = ['HfO2', 'SiO2', 'SiC', 'Al2O3', 'MgF2', 'TiO2', 'Fe2O3', 'MgF2', 'Si3N4', 'TiN', 'ZnO', 'ZnS', 'ZnSe']\nMETALS = ['Ag', 'Al', 'Cr', 'Ge', 'Si', 'Ni']\n\nnum_workers = 8\n\ndef cal_reward(R, T, A, target):\n '''\n Calculate reward based on given spectrums. \n We calculate the reward using averaged (1-mse).\n\n Args:\n R, T, A: numpy array. Reflection, transmission, and \n absorption spectrums, respectively.\n target: dict. {'R':np.array, 'T':np.array, 'A':np.array}\n\n Returns:\n reward: float. Reward for the spectrum. \n '''\n\n reward = 0\n for k, v in target.items():\n\n if k == 'R':\n res = R\n elif k == 'T':\n res = T\n else:\n res = A\n \n reward += 1 - np.abs(res.squeeze() - v).mean()\n\n reward /= len(target)\n\n return reward\n\n\nclass Memory:\n def __init__(self):\n self.actions = []\n self.states = []\n self.logprobs = []\n self.rewards = []\n self.is_terminals = []\n\n def clear_memory(self):\n del self.actions[:]\n del self.states[:]\n del self.logprobs[:]\n del self.rewards[:]\n del self.is_terminals[:]\n\n\ndef batch_spectrum(env, names_list, thickness_list):\n\n def spectrum(args):\n '''\n Inputs: \n 1. names: list of lists, each list correspond to the structures\n 2. thickness: list of lists\n '''\n names, thickness = args\n R, T, A = env.spectrum(names, thickness, 0, False)\n\n return R, T, A\n\n res = Parallel(n_jobs=num_workers)(delayed(spectrum)(args)\n for args in\n zip(names_list, thickness_list))\n res = np.array(res)\n Rs, Ts, As = res[:, 0, :], res[:, 1, :], res[:, 2, :]\n\n return Rs, Ts, As\n\n\ndef merge_layers(categories, thicknesses):\n '''\n Merges consecutive layers with the same material types.\n '''\n\n thicknesses = thicknesses[1:-1]\n c_output = [categories[0]]\n t_output = [thicknesses[0]]\n for i, (c, d) in enumerate(zip(categories[1:], thicknesses[1:])):\n\n if c == c_output[-1]:\n t_output[-1] += d\n continue\n else:\n c_output.append(c)\n t_output.append(d)\n\n t_output.insert(0, np.inf)\n t_output.insert(len(t_output), np.inf)\n\n return c_output, t_output\n\n\ndef get_structure(categories, values, materials, ds, continuous=False,\n max_value=400):\n '''\n Given categories and values, return the strucure in the form \n (name (str), thickness (nm))\n '''\n\n def threshold(value):\n '''\n\n '''\n\n names = [materials[item] for item in categories]\n\n if not continuous:\n thickness = [np.inf] + [ds[item] for item in values] + [np.inf]\n else:\n thickness = []\n for category, value in zip(categories, values):\n name = materials[category]\n if name == 'Ag':\n thickness.append(\n min(max(15, int(value * max_value//2)), max_value))\n elif name in METALS:\n thickness.append(\n min(max(5, int(value * max_value//2)), max_value))\n elif name in INSULATORS:\n thickness.append(\n min(max(1, int(value * max_value//2)), max_value))\n else:\n raise ValueError('Material not known')\n # thickness = [np.inf] + [min(max(5, int(item * 2e2)), 200) for i,\n # item in enumerate(values)] + [np.inf]\n thickness = [np.inf] + thickness + [np.inf]\n return names, thickness\n\nclass DesignTracker():\n def __init__(self, epochs, **kwargs):\n \"\"\"\n This class tracks the best designs discovered.\n \"\"\"\n if epochs == -1:\n self.layer_ls = []\n self.thick_ls = []\n self.max_ret_ls = []\n self.layer_ls = [0] * epochs\n self.thick_ls = [0] * epochs\n self.max_ret_ls = [0] * epochs\n self.kwargs = kwargs\n self.current_e = 0\n\n def store(self, layers, thicknesses, ret, e, append_mode=False):\n \n if append_mode:\n self.layer_ls.append(layers)\n self.thick_ls.append(thicknesses)\n self.max_ret_ls.append(ret)\n\n else:\n if ret >= self.max_ret_ls[e]:\n self.layer_ls[e] = layers\n self.thick_ls[e] = thicknesses\n self.max_ret_ls[e] = ret\n\n def save_state(self):\n # save buffer from all processes\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n filename = os.path.join(self.kwargs['output_dir'], 'design_tracker_{}.pkl'.format(rank))\n pkl.dump(self, open(filename, 'wb'))\n \n def print_progress(self):\n progress = list(zip(self.layer_ls, self.thick_ls, self.max_ret_ls))\n read_progress = []\n for i in range(len(progress)):\n if progress[i] == (0,0,0):\n break\n read_progress.append(['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]) + ', Merit {:.3f}'.format(progress[i][2])])\n\n return read_progress\n\ndef print_progress(progress):\n\n for i in range(len(progress)):\n print(progress[i], 0)\n progress[i] = ['|'.join([l + ' ' + str(d) + ' nm' for l, d in zip(progress[i][0], progress[i][1])]), progress[i][2]]\n\n return progress\n\nclass TMM_sim():\n def __init__(self, mats=['Ge'], wavelength=np.arange(0.38, 0.805, 0.01), substrate='Cr', substrate_thick=500):\n '''\n This class returns the spectrum given the designed structures.\n '''\n self.mats = mats\n # include substrate\n self.all_mats = mats + [substrate] if substrate not in ['Glass', 'Air'] else mats\n self.wavelength = wavelength\n self.nk_dict = self.load_materials()\n self.substrate = substrate\n self.substrate_thick = substrate_thick\n\n def load_materials(self):\n '''\n Load material nk and return corresponding interpolators.\n\n Return:\n nk_dict: dict, key -- material name, value: n, k in the \n self.wavelength range\n '''\n nk_dict = {}\n\n for mat in self.all_mats:\n nk = pd.read_csv(os.path.join(DATABASE, mat + '.csv'))\n nk.dropna(inplace=True)\n wl = nk['wl'].to_numpy()\n index = (nk['n'] + nk['k'] * 1.j).to_numpy()\n mat_nk_data = np.hstack((wl[:, np.newaxis], index[:, np.newaxis]))\n\n\n mat_nk_fn = interp1d(\n mat_nk_data[:, 0].real, mat_nk_data[:, 1], kind='quadratic')\n nk_dict[mat] = mat_nk_fn(self.wavelength)\n\n return nk_dict\n\n def spectrum(self, materials, thickness, theta=0, plot=False, title=False):\n '''\n Input:\n materials: list\n thickness: list\n theta: degree, the incidence angle\n\n Return:\n s: array, spectrum\n '''\n degree = pi/180\n if self.substrate != 'Air':\n thickness.insert(-1, self.substrate_thick) # substrate thickness\n\n R, T, A = [], [], []\n for i, lambda_vac in enumerate(self.wavelength * 1e3):\n\n # we assume the last layer is glass\n if self.substrate == 'Glass':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1.45, 1]\n elif self.substrate == 'Air':\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [1]\n else:\n n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict[self.substrate][i], 1]\n\n # n_list = [1] + [self.nk_dict[mat][i] for mat in materials] + [self.nk_dict['Cr'][i]]\n\n # mport pdb; pdb.set_trace()\n res = coh_tmm('s', n_list, thickness, theta * degree, lambda_vac)\n R.append(res['R'])\n T.append(res['T'])\n\n R, T = np.array(R), np.array(T)\n A = 1 - R - T\n\n if plot:\n self.plot_spectrum(R, T, A)\n if title:\n thick = thickness[1:-1]\n title = ' | '.join(['{}nm {}'.format(d, m)\n for d, m in zip(thick, materials)])\n if self.substrate is not 'Air':\n title = 'Air | ' + title + ' | {}nm {} '.format(self.substrate_thick, self.substrate) + '| Air'\n else:\n title = 'Air | ' + title + ' | Air'\n plt.title(title, **{'size': '10'})\n\n return R, T, A\n\n def plot_spectrum(self, R, T, A):\n\n plt.plot(self.wavelength * 1000, R, self.wavelength *\n 1000, T, self.wavelength * 1000, A, linewidth=3)\n plt.ylabel('R/T/A')\n plt.xlabel('Wavelength (nm)')\n plt.legend(['R: Average = {:.2f}%'.\n format(np.mean(R)*100),\n 'T: Average = {:.2f}%'.\n format(np.mean(T)*100),\n 'A: Average = {:.2f}%'.\n format(np.mean(A)*100)])\n plt.grid('on', linestyle='--')\n plt.ylim([0, 1])\n\n\n# Plotting utils\ndef visualize_progress(file, x, ax=None, color='b', alpha=1):\n df = pd.read_csv(file, sep=\"\\t\")\n width = 0.5\n # x = 'Time'\n if ax is None:\n fig, ax = plt.subplots(2,1)\n sns.lineplot(x=x, y='MaxEpRet', data=df, ax=ax[0], color=color, alpha=alpha)\n # ax[0].legend(['Max {}'.format(np.max(df['MaxEpRet']))])\n sns.lineplot(x=x, y='AverageEpRet', data=df,\n ax=ax[1], color=color, alpha=alpha)\n plt.fill_between(df[x],\n df['AverageEpRet']-width/2*df['StdEpRet'],\n df['AverageEpRet']+width/2*df['StdEpRet'],\n alpha=0.3, color=color)\n\n return df\n\ndef combine_tracker(folder):\n '''\n Merge all buffers\n '''\n trackers = []\n \n if 'design_tracker_merged.pkl' in os.listdir(folder):\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n combined_tracker = pkl.load(open(tracker_file, 'rb'))\n return combined_tracker\n\n for file in os.listdir(folder):\n if file.startswith('design_tracker_'):\n tracker_file = os.path.join(folder, file)\n trackers.append(pkl.load(open(tracker_file, 'rb'))) \n\n combined_tracker = DesignTracker(len(trackers[0].layer_ls))\n max_idx = np.argmax(np.array([tracker.max_ret_ls for tracker in trackers]), axis=0)\n for e in range(len(trackers[0].layer_ls)):\n combined_tracker.layer_ls[e] = trackers[max_idx[e]].layer_ls[e]\n combined_tracker.thick_ls[e] = trackers[max_idx[e]].thick_ls[e]\n combined_tracker.max_ret_ls[e] = trackers[max_idx[e]].max_ret_ls[e]\n \n if combined_tracker.layer_ls[-1] != 0:\n tracker_file = os.path.join(folder, 'design_tracker_merged.pkl')\n pkl.dump(combined_tracker, open(os.path.join(folder, tracker_file), 'wb'))\n\n return combined_tracker\n\ndef summarize_res(exp_ls, seed_ls, color, alpha, x='Epoch'):\n \n root = '../spinningup/data/'\n progress_ls = []\n max_ret_ls = []\n\n params = {'size':14}\n matplotlib.rc('font', **params)\n\n fig, ax = plt.subplots(2,1, figsize=(10,8))\n for a, c, exp, seed in zip(alpha, color, exp_ls, seed_ls):\n folder = os.path.join(root, exp, exp+'_s{}'.format(seed))\n progress_file = os.path.join(folder, 'progress.txt')\n df = visualize_progress(progress_file, x=x, ax=ax, color=c, alpha=a)\n\n tracker = combine_tracker(folder)\n progress = tracker.print_progress()\n print('{}, Best discovered so far {}'.format(exp, progress[np.argmax(tracker.max_ret_ls)]))\n progress_ls.append(progress)\n max_ret_ls.append('Max merit {:.3f}'.format(np.max(df['MaxEpRet'])))\n\n ax[0].legend(max_ret_ls)\n ax[1].legend(exp_ls)\n plt.show()\n return progress_ls\n\ndef load_exp_res(folder):\n subfolders = [item for item in glob.glob(folder+'/*')]\n\n def read_hyper(file_name, rep=10):\n\n with open(os.path.join(file_name, 'config.json')) as f:\n hypers = json.load(f)\n hypers_dict = {}\n for k, v in hypers.items():\n if k.startswith('logger'):\n continue\n elif isinstance(v, dict):\n for kk, vv in v.items():\n if isinstance(vv, list):\n hypers_dict[str(k)+'_'+str(kk)] = [vv[0]]*rep\n else:\n hypers_dict[str(k)+'_'+str(kk)] = [vv]*rep\n else: \n hypers_dict[k] = [v] * rep\n \n hyper_df = pd.DataFrame(hypers_dict)\n return hyper_df \n\n first=True # first pandas file to load\n for subfolder in tqdm(subfolders):\n runs = glob.glob(subfolder+'/*')\n num_epochs = len(pd.read_csv(os.path.join(runs[0], 'progress.txt'),sep='\\t'))\n for run in runs:\n\n tracker = combine_tracker(run)\n progress = tracker.print_progress()\n best_design = progress[np.argmax(tracker.max_ret_ls)]\n\n if first:\n df = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\\t')\n hyper_df = read_hyper(run, rep=len(df))\n best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df))\n df = pd.concat([df, hyper_df, best_designs_df], axis=1)\n first = False\n\n else:\n df_ = pd.read_csv(os.path.join(run, 'progress.txt'),sep='\\t')\n hyper_df = read_hyper(run, rep=len(df_))\n best_designs_df = pd.DataFrame([{'best_design':best_design}]*len(df_))\n df_ = pd.concat([df_, hyper_df, best_designs_df], axis=1)\n df = pd.concat([df, df_], axis=0) \n\n return df \n\n\ndef finetune(simulator, m0, x0, target, display=False, bounds=None):\n '''\n Finetune the structure using quasi-Newton's method.\n \n Args:\n m0: materials list given by the upstream RL\n x0: thicknesses given by the upstream RL\n display: if true, then plot the spectrum before and after the finetuning.\n \n Returns:\n x_opt: finetuned thickness list\n '''\n \n def objective_func(x):\n R, T, A = simulator.spectrum(m0, [np.inf]+list(x)+[np.inf])\n return 1-cal_reward(R, T, A, target)\n \n if bounds is None:\n bounds = [(15, 200)] * len(x0)\n \n res = minimize(objective_func, x0, bounds=bounds, options={'disp':True})\n x_opt = [int(item) for item in res.x]\n \n if display:\n plt.figure()\n simulator.spectrum(m0, [np.inf]+x0+[np.inf], title=True, plot=True)\n plt.figure()\n simulator.spectrum(m0, [np.inf]+x_opt+[np.inf], title=True, plot=True)\n \n return x_opt, res\n",
"step-ids": [
11,
16,
20,
22,
26
]
}
|
[
11,
16,
20,
22,
26
] |
<|reserved_special_token_0|>
class Ticket(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
<|reserved_special_token_0|>
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ticket(models.Model):
STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'
), ('30', 'Закрыто'), ('40', 'Удалено')
PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'
), ('30', 'Длительное')
CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',
'Печать, принтеры, расходники'), ('20',
'Корпоративные системы (SAP,АСУД ..)'), ('30',
'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')
status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)
priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES
)
category = models.CharField('Категория', max_length=3, choices=
CATEGORY_CHOICES, blank=True, null=True)
hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,
decimal_places=1, default=2)
description = models.TextField('Описание проблемы')
resume = models.TextField('Отчёт о решении', blank=True, null=True)
user = models.ForeignKey(User, related_name='tickets')
admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True
)
device = models.ForeignKey(Device, blank=True, null=True)
ctime = models.DateTimeField(auto_now_add=True)
closing_time = models.DateTimeField(blank=True, null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
def is_closed(self, *args):
value = self.status
if args:
value = args[0]
if value == '30':
return True
else:
return False
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Device(models.Model):
TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (
'30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (
'70', 'Маршрутизатор'), ('80', 'Модем')
type = models.CharField(max_length=3, choices=TYPE_CHOICES)
inv_no = models.CharField(max_length=40)
ip = models.IPAddressField(blank=True, null=True)
model = models.CharField(max_length=60, blank=True, null=True)
mac = custom_fields.MACAddressField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
place = models.ForeignKey('Place')
hostname = models.CharField(blank=True, null=True, max_length=40)
def type_display(self):
for desc in self.TYPE_CHOICES:
if desc[0] == self.type:
return desc[1]
def get_absolute_url(self):
return '/place/' + str(self.place.id)
class Ticket(models.Model):
STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'
), ('30', 'Закрыто'), ('40', 'Удалено')
PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'
), ('30', 'Длительное')
CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',
'Печать, принтеры, расходники'), ('20',
'Корпоративные системы (SAP,АСУД ..)'), ('30',
'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')
status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)
priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES
)
category = models.CharField('Категория', max_length=3, choices=
CATEGORY_CHOICES, blank=True, null=True)
hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,
decimal_places=1, default=2)
description = models.TextField('Описание проблемы')
resume = models.TextField('Отчёт о решении', blank=True, null=True)
user = models.ForeignKey(User, related_name='tickets')
admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True
)
device = models.ForeignKey(Device, blank=True, null=True)
ctime = models.DateTimeField(auto_now_add=True)
closing_time = models.DateTimeField(blank=True, null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
def is_closed(self, *args):
value = self.status
if args:
value = args[0]
if value == '30':
return True
else:
return False
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Message(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class User(models.Model):
name = models.CharField('Имя', max_length=60)
email = models.EmailField(blank=True, null=True)
phone = models.CharField('Внутр. телефон', max_length=30, blank=True,
null=True)
mobile = models.CharField('Корп. мобильный', max_length=30, blank=True,
null=True)
city_phone = models.CharField('Городской телефон', max_length=30, blank
=True, null=True)
sat_phone = models.CharField('Спутниковый телефон', max_length=30,
blank=True, null=True)
personal_phone = models.CharField('Личный телефон', max_length=30,
blank=True, null=True)
admin = models.BooleanField(default=False)
login = models.CharField(max_length=16, blank=True, null=True)
password = models.CharField(max_length=32, blank=True, null=True)
place = models.ForeignKey('Place', blank=True, null=True)
class Device(models.Model):
TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (
'30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (
'70', 'Маршрутизатор'), ('80', 'Модем')
type = models.CharField(max_length=3, choices=TYPE_CHOICES)
inv_no = models.CharField(max_length=40)
ip = models.IPAddressField(blank=True, null=True)
model = models.CharField(max_length=60, blank=True, null=True)
mac = custom_fields.MACAddressField(blank=True, null=True)
info = models.TextField(blank=True, null=True)
place = models.ForeignKey('Place')
hostname = models.CharField(blank=True, null=True, max_length=40)
def type_display(self):
for desc in self.TYPE_CHOICES:
if desc[0] == self.type:
return desc[1]
def get_absolute_url(self):
return '/place/' + str(self.place.id)
class Ticket(models.Model):
STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'
), ('30', 'Закрыто'), ('40', 'Удалено')
PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'
), ('30', 'Длительное')
CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',
'Печать, принтеры, расходники'), ('20',
'Корпоративные системы (SAP,АСУД ..)'), ('30',
'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')
status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)
priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES
)
category = models.CharField('Категория', max_length=3, choices=
CATEGORY_CHOICES, blank=True, null=True)
hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,
decimal_places=1, default=2)
description = models.TextField('Описание проблемы')
resume = models.TextField('Отчёт о решении', blank=True, null=True)
user = models.ForeignKey(User, related_name='tickets')
admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True
)
device = models.ForeignKey(Device, blank=True, null=True)
ctime = models.DateTimeField(auto_now_add=True)
closing_time = models.DateTimeField(blank=True, null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta = datetime.datetime.now() - self.ctime
return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)
def is_new(self, *args):
value = self.status
if args:
value = args[0]
if value == '00':
return True
else:
return False
def is_closed(self, *args):
value = self.status
if args:
value = args[0]
if value == '30':
return True
else:
return False
def accept_by(self, user):
self.admin = user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self', null=True, blank=True)
address = models.CharField(max_length=70)
LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (
3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,
'Место/комплекс')
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res = 0
try:
if self.parent != None:
o = self
while o.parent != None:
res += 1
o = o.parent
except:
None
return res
def level_display(self):
level = self.get_level()
for desc in self.LEVEL_DESC:
if desc[0] == level:
return desc[1]
def path(self):
path = []
o = self
while o.parent != None:
path.insert(0, o)
o = o.parent
path.insert(0, o)
return path
def get_absolute_url(self):
return '/place/' + str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
class Document(models.Model):
name = models.CharField(max_length=60)
place = models.ForeignKey(Place, blank=True, null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document = models.ForeignKey(Document)
version = models.IntegerField()
file_name = models.CharField(max_length=60)
comment = models.CharField(max_length=90, blank=True, null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
<|reserved_special_token_1|>
# -*- coding: utf8 -*-
from django.db import models
import custom_fields
import datetime
#import mptt
# Create your models here.
class Message(models.Model):
user = models.ForeignKey('User')
time = models.DateTimeField(auto_now=True,auto_now_add=True)
text = models.TextField()
#true если это ответ поддержки
reply = models.BooleanField(default=False)
ticket = models.ForeignKey('Ticket')
ip = models.IPAddressField(blank=True,null=True)
class User(models.Model):
name = models.CharField("Имя",max_length=60)
email = models.EmailField(blank=True,null=True)
phone = models.CharField("Внутр. телефон",max_length=30,blank=True,null=True)
mobile = models.CharField("Корп. мобильный",max_length=30,blank=True,null=True)
city_phone = models.CharField("Городской телефон",max_length=30,blank=True,null=True)
sat_phone = models.CharField("Спутниковый телефон",max_length=30,blank=True,null=True)
personal_phone = models.CharField("Личный телефон",max_length=30,blank=True,null=True)
admin = models.BooleanField(default=False)
login = models.CharField(max_length=16,blank=True,null=True)
password = models.CharField(max_length=32,blank=True,null=True)
place = models.ForeignKey('Place',blank=True,null=True)
class Device(models.Model):
TYPE_CHOICES=(
('00','Компьютер'),
('10','Монитор'),
('20','Принтер'),
('30','МФУ'),
('40','Плоттер'),
('50','Сканер'),
('60','Сервер'),
('70','Маршрутизатор'),
('80','Модем'),
)
type=models.CharField(max_length=3,choices=TYPE_CHOICES)
inv_no=models.CharField(max_length=40)
ip=models.IPAddressField(blank=True,null=True)
model=models.CharField(max_length=60,blank=True,null=True)
mac=custom_fields.MACAddressField(blank=True,null=True)
info=models.TextField(blank=True,null=True)
place = models.ForeignKey('Place')
hostname=models.CharField(blank=True,null=True,max_length=40)
def type_display(self):
for desc in self.TYPE_CHOICES:
if desc[0]==self.type:
return desc[1]
def get_absolute_url(self):
return "/place/"+str(self.place.id)
class Ticket(models.Model):
#NEW,OPEN,CLOSED,DELETED
STATUS_CHOICES=(
('00','Новое'),
('10','Принято'),
('20','Ожидаем ответ'),
('30','Закрыто'),
('40','Удалено'),
)
PRIO_CHOICES=(
('00','Крайне срочно'),
('10','Срочно'),
('20','Обычно'),
('30','Длительное')
)
CATEGORY_CHOICES=(
('00','Компьютеры, локальный софт, железо'),
('10','Печать, принтеры, расходники'),
('20','Корпоративные системы (SAP,АСУД ..)'),
('30','Сетевые сервисы и оборуд., Серверы'),
('40','СКС (провода, розетки)'),
)
status = models.CharField("Статус",max_length=3, choices=STATUS_CHOICES)
priority = models.CharField("Приоритет",max_length=3, choices=PRIO_CHOICES)
category = models.CharField("Категория",max_length=3, choices=CATEGORY_CHOICES,blank=True,null=True)
hours_limit=models.DecimalField("Лимит времени, ч.",max_digits=4, decimal_places=1,default=2)
#Описание проблемы. при создании тикета - присваиваем текст 1го обращения
#В процессе выполнения заявки можем менять
description = models.TextField("Описание проблемы")
#Описание решения по закрытии заявки
resume = models.TextField("Отчёт о решении",blank=True,null=True)
user = models.ForeignKey(User,related_name="tickets")
admin = models.ForeignKey(User,related_name="tasks",blank=True,null=True)
device = models.ForeignKey(Device,blank=True,null=True)
#Время создания.
ctime = models.DateTimeField(auto_now_add = True)
#Время закрытия
closing_time = models.DateTimeField(blank=True,null=True)
def get_short_text(self):
return self.description[:120]
def hours_from_now(self):
delta=datetime.datetime.now()-self.ctime
return round(delta.days*24.0+delta.seconds/3600.0,1)
def is_new(self,*args):
value=self.status
if args:
value=args[0]
if value=='00':
return True
else:
return False
def is_closed(self,*args):
value=self.status
if args:
value=args[0]
if value=='30':
return True
else:
return False
def accept_by(self,user):
self.admin=user
def no(self):
return '{0:0>5}'.format(self.id)
class Place(models.Model):
name = models.CharField(max_length=60)
parent = models.ForeignKey('self',null=True, blank=True )
address = models.CharField(max_length=70)
LEVEL_DESC=(
(1,"Населённый пункт"),
(2,"Территория, группа зданий"),
(3,"Здание"),
(4,"Этаж"),
(5,"Кабинет/помещение"),
(6,"Место/комплекс"),
)
def childs(self):
return Place.objects.filter(parent=self)
def get_level(self):
res=0
try:
if self.parent!=None:
o=self
while (o.parent !=None):
res+=1
o=o.parent
except:
None
return res
def level_display(self):
level=self.get_level()
for desc in self.LEVEL_DESC:
if desc[0]==level:
return desc[1]
def path(self):
path=[]
o=self
while (o.parent != None):
path.insert(0,o)
o=o.parent
path.insert(0,o)
return path
def get_absolute_url(self):
return '/place/'+str(self.id)
def __unicode__(self):
return self.name
def users(self):
return User.objects.filter(place=self)
#mptt.register(Place)
class Document(models.Model):
name=models.CharField(max_length=60)
place=models.ForeignKey(Place,blank=True,null=True)
def latest_file(self):
return DocFile.objects.filter(document=self).order_by('-id')[0]
class DocFile(models.Model):
document=models.ForeignKey(Document)
version=models.IntegerField()
file_name=models.CharField(max_length=60)
comment=models.CharField(max_length=90,blank=True,null=True)
ctime = models.DateTimeField()
user = models.ForeignKey(User)
|
flexible
|
{
"blob_id": "64fd597918fe8133d53d1df741512cd2e49a111d",
"index": 1252,
"step-1": "<mask token>\n\n\nclass Ticket(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n <mask token>\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n",
"step-2": "<mask token>\n\n\nclass Ticket(models.Model):\n STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'\n ), ('30', 'Закрыто'), ('40', 'Удалено')\n PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'\n ), ('30', 'Длительное')\n CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',\n 'Печать, принтеры, расходники'), ('20',\n 'Корпоративные системы (SAP,АСУД ..)'), ('30',\n 'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')\n status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES\n )\n category = models.CharField('Категория', max_length=3, choices=\n CATEGORY_CHOICES, blank=True, null=True)\n hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,\n decimal_places=1, default=2)\n description = models.TextField('Описание проблемы')\n resume = models.TextField('Отчёт о решении', blank=True, null=True)\n user = models.ForeignKey(User, related_name='tickets')\n admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True\n )\n device = models.ForeignKey(Device, blank=True, null=True)\n ctime = models.DateTimeField(auto_now_add=True)\n closing_time = models.DateTimeField(blank=True, null=True)\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n\n def is_closed(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '30':\n return True\n else:\n return False\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n",
"step-3": "<mask token>\n\n\nclass User(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Device(models.Model):\n TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (\n '30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (\n '70', 'Маршрутизатор'), ('80', 'Модем')\n type = models.CharField(max_length=3, choices=TYPE_CHOICES)\n inv_no = models.CharField(max_length=40)\n ip = models.IPAddressField(blank=True, null=True)\n model = models.CharField(max_length=60, blank=True, null=True)\n mac = custom_fields.MACAddressField(blank=True, null=True)\n info = models.TextField(blank=True, null=True)\n place = models.ForeignKey('Place')\n hostname = models.CharField(blank=True, null=True, max_length=40)\n\n def type_display(self):\n for desc in self.TYPE_CHOICES:\n if desc[0] == self.type:\n return desc[1]\n\n def get_absolute_url(self):\n return '/place/' + str(self.place.id)\n\n\nclass Ticket(models.Model):\n STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'\n ), ('30', 'Закрыто'), ('40', 'Удалено')\n PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'\n ), ('30', 'Длительное')\n CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',\n 'Печать, принтеры, расходники'), ('20',\n 'Корпоративные системы (SAP,АСУД ..)'), ('30',\n 'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')\n status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES\n )\n category = models.CharField('Категория', max_length=3, choices=\n CATEGORY_CHOICES, blank=True, null=True)\n hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,\n decimal_places=1, default=2)\n description = models.TextField('Описание проблемы')\n resume = models.TextField('Отчёт о решении', blank=True, null=True)\n user = models.ForeignKey(User, related_name='tickets')\n admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True\n )\n device = models.ForeignKey(Device, blank=True, null=True)\n ctime = models.DateTimeField(auto_now_add=True)\n closing_time = models.DateTimeField(blank=True, null=True)\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n\n def is_closed(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '30':\n return True\n else:\n return False\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n",
"step-4": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass User(models.Model):\n name = models.CharField('Имя', max_length=60)\n email = models.EmailField(blank=True, null=True)\n phone = models.CharField('Внутр. телефон', max_length=30, blank=True,\n null=True)\n mobile = models.CharField('Корп. мобильный', max_length=30, blank=True,\n null=True)\n city_phone = models.CharField('Городской телефон', max_length=30, blank\n =True, null=True)\n sat_phone = models.CharField('Спутниковый телефон', max_length=30,\n blank=True, null=True)\n personal_phone = models.CharField('Личный телефон', max_length=30,\n blank=True, null=True)\n admin = models.BooleanField(default=False)\n login = models.CharField(max_length=16, blank=True, null=True)\n password = models.CharField(max_length=32, blank=True, null=True)\n place = models.ForeignKey('Place', blank=True, null=True)\n\n\nclass Device(models.Model):\n TYPE_CHOICES = ('00', 'Компьютер'), ('10', 'Монитор'), ('20', 'Принтер'), (\n '30', 'МФУ'), ('40', 'Плоттер'), ('50', 'Сканер'), ('60', 'Сервер'), (\n '70', 'Маршрутизатор'), ('80', 'Модем')\n type = models.CharField(max_length=3, choices=TYPE_CHOICES)\n inv_no = models.CharField(max_length=40)\n ip = models.IPAddressField(blank=True, null=True)\n model = models.CharField(max_length=60, blank=True, null=True)\n mac = custom_fields.MACAddressField(blank=True, null=True)\n info = models.TextField(blank=True, null=True)\n place = models.ForeignKey('Place')\n hostname = models.CharField(blank=True, null=True, max_length=40)\n\n def type_display(self):\n for desc in self.TYPE_CHOICES:\n if desc[0] == self.type:\n return desc[1]\n\n def get_absolute_url(self):\n return '/place/' + str(self.place.id)\n\n\nclass Ticket(models.Model):\n STATUS_CHOICES = ('00', 'Новое'), ('10', 'Принято'), ('20', 'Ожидаем ответ'\n ), ('30', 'Закрыто'), ('40', 'Удалено')\n PRIO_CHOICES = ('00', 'Крайне срочно'), ('10', 'Срочно'), ('20', 'Обычно'\n ), ('30', 'Длительное')\n CATEGORY_CHOICES = ('00', 'Компьютеры, локальный софт, железо'), ('10',\n 'Печать, принтеры, расходники'), ('20',\n 'Корпоративные системы (SAP,АСУД ..)'), ('30',\n 'Сетевые сервисы и оборуд., Серверы'), ('40', 'СКС (провода, розетки)')\n status = models.CharField('Статус', max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField('Приоритет', max_length=3, choices=PRIO_CHOICES\n )\n category = models.CharField('Категория', max_length=3, choices=\n CATEGORY_CHOICES, blank=True, null=True)\n hours_limit = models.DecimalField('Лимит времени, ч.', max_digits=4,\n decimal_places=1, default=2)\n description = models.TextField('Описание проблемы')\n resume = models.TextField('Отчёт о решении', blank=True, null=True)\n user = models.ForeignKey(User, related_name='tickets')\n admin = models.ForeignKey(User, related_name='tasks', blank=True, null=True\n )\n device = models.ForeignKey(Device, blank=True, null=True)\n ctime = models.DateTimeField(auto_now_add=True)\n closing_time = models.DateTimeField(blank=True, null=True)\n\n def get_short_text(self):\n return self.description[:120]\n\n def hours_from_now(self):\n delta = datetime.datetime.now() - self.ctime\n return round(delta.days * 24.0 + delta.seconds / 3600.0, 1)\n\n def is_new(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '00':\n return True\n else:\n return False\n\n def is_closed(self, *args):\n value = self.status\n if args:\n value = args[0]\n if value == '30':\n return True\n else:\n return False\n\n def accept_by(self, user):\n self.admin = user\n\n def no(self):\n return '{0:0>5}'.format(self.id)\n\n\nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self', null=True, blank=True)\n address = models.CharField(max_length=70)\n LEVEL_DESC = (1, 'Населённый пункт'), (2, 'Территория, группа зданий'), (\n 3, 'Здание'), (4, 'Этаж'), (5, 'Кабинет/помещение'), (6,\n 'Место/комплекс')\n\n def childs(self):\n return Place.objects.filter(parent=self)\n\n def get_level(self):\n res = 0\n try:\n if self.parent != None:\n o = self\n while o.parent != None:\n res += 1\n o = o.parent\n except:\n None\n return res\n\n def level_display(self):\n level = self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0] == level:\n return desc[1]\n\n def path(self):\n path = []\n o = self\n while o.parent != None:\n path.insert(0, o)\n o = o.parent\n path.insert(0, o)\n return path\n\n def get_absolute_url(self):\n return '/place/' + str(self.id)\n\n def __unicode__(self):\n return self.name\n\n def users(self):\n return User.objects.filter(place=self)\n\n\nclass Document(models.Model):\n name = models.CharField(max_length=60)\n place = models.ForeignKey(Place, blank=True, null=True)\n\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n\n\nclass DocFile(models.Model):\n document = models.ForeignKey(Document)\n version = models.IntegerField()\n file_name = models.CharField(max_length=60)\n comment = models.CharField(max_length=90, blank=True, null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n",
"step-5": "# -*- coding: utf8 -*-\nfrom django.db import models\nimport custom_fields\nimport datetime\n#import mptt\n\n# Create your models here.\nclass Message(models.Model):\n user = models.ForeignKey('User')\n time = models.DateTimeField(auto_now=True,auto_now_add=True)\n text = models.TextField()\n #true если это ответ поддержки\n reply = models.BooleanField(default=False)\n ticket = models.ForeignKey('Ticket')\n ip = models.IPAddressField(blank=True,null=True)\n \n\nclass User(models.Model):\n name = models.CharField(\"Имя\",max_length=60)\n email = models.EmailField(blank=True,null=True)\n phone = models.CharField(\"Внутр. телефон\",max_length=30,blank=True,null=True)\n mobile = models.CharField(\"Корп. мобильный\",max_length=30,blank=True,null=True)\n city_phone = models.CharField(\"Городской телефон\",max_length=30,blank=True,null=True)\n sat_phone = models.CharField(\"Спутниковый телефон\",max_length=30,blank=True,null=True)\n personal_phone = models.CharField(\"Личный телефон\",max_length=30,blank=True,null=True)\n admin = models.BooleanField(default=False)\n login = models.CharField(max_length=16,blank=True,null=True)\n password = models.CharField(max_length=32,blank=True,null=True)\n place = models.ForeignKey('Place',blank=True,null=True)\n\nclass Device(models.Model):\n TYPE_CHOICES=(\n ('00','Компьютер'),\n ('10','Монитор'),\n ('20','Принтер'),\n ('30','МФУ'),\n ('40','Плоттер'),\n ('50','Сканер'),\n ('60','Сервер'),\n ('70','Маршрутизатор'),\n ('80','Модем'),\n )\n type=models.CharField(max_length=3,choices=TYPE_CHOICES)\n inv_no=models.CharField(max_length=40)\n ip=models.IPAddressField(blank=True,null=True)\n model=models.CharField(max_length=60,blank=True,null=True)\n mac=custom_fields.MACAddressField(blank=True,null=True)\n info=models.TextField(blank=True,null=True)\n place = models.ForeignKey('Place')\n hostname=models.CharField(blank=True,null=True,max_length=40)\n def type_display(self): \n for desc in self.TYPE_CHOICES:\n if desc[0]==self.type:\n return desc[1]\n def get_absolute_url(self):\n return \"/place/\"+str(self.place.id)\n\nclass Ticket(models.Model):\n #NEW,OPEN,CLOSED,DELETED\n STATUS_CHOICES=(\n ('00','Новое'),\n ('10','Принято'),\n ('20','Ожидаем ответ'),\n ('30','Закрыто'),\n ('40','Удалено'), \n )\n PRIO_CHOICES=(\n ('00','Крайне срочно'),\n ('10','Срочно'),\n ('20','Обычно'),\n ('30','Длительное')\n )\n\n CATEGORY_CHOICES=(\n ('00','Компьютеры, локальный софт, железо'),\n ('10','Печать, принтеры, расходники'),\n ('20','Корпоративные системы (SAP,АСУД ..)'),\n ('30','Сетевые сервисы и оборуд., Серверы'),\n ('40','СКС (провода, розетки)'),\n \n )\n\n status = models.CharField(\"Статус\",max_length=3, choices=STATUS_CHOICES)\n priority = models.CharField(\"Приоритет\",max_length=3, choices=PRIO_CHOICES)\n category = models.CharField(\"Категория\",max_length=3, choices=CATEGORY_CHOICES,blank=True,null=True)\n hours_limit=models.DecimalField(\"Лимит времени, ч.\",max_digits=4, decimal_places=1,default=2)\n #Описание проблемы. при создании тикета - присваиваем текст 1го обращения\n #В процессе выполнения заявки можем менять\n description = models.TextField(\"Описание проблемы\")\n #Описание решения по закрытии заявки\n resume = models.TextField(\"Отчёт о решении\",blank=True,null=True)\n user = models.ForeignKey(User,related_name=\"tickets\")\n admin = models.ForeignKey(User,related_name=\"tasks\",blank=True,null=True)\n device = models.ForeignKey(Device,blank=True,null=True) \n #Время создания. \n ctime = models.DateTimeField(auto_now_add = True)\n #Время закрытия\n closing_time = models.DateTimeField(blank=True,null=True)\n\n def get_short_text(self):\n return self.description[:120]\n \n def hours_from_now(self):\n delta=datetime.datetime.now()-self.ctime\n return round(delta.days*24.0+delta.seconds/3600.0,1)\n\n def is_new(self,*args):\n value=self.status\n if args:\n value=args[0]\n if value=='00':\n return True\n else:\n return False\n\n def is_closed(self,*args):\n value=self.status\n if args:\n value=args[0]\n if value=='30':\n return True\n else:\n return False\n \n def accept_by(self,user):\n self.admin=user\n \n def no(self):\n return '{0:0>5}'.format(self.id)\n \nclass Place(models.Model):\n name = models.CharField(max_length=60)\n parent = models.ForeignKey('self',null=True, blank=True )\n address = models.CharField(max_length=70)\n LEVEL_DESC=(\n (1,\"Населённый пункт\"),\n (2,\"Территория, группа зданий\"),\n (3,\"Здание\"),\n (4,\"Этаж\"),\n (5,\"Кабинет/помещение\"),\n (6,\"Место/комплекс\"),\n )\n def childs(self):\n return Place.objects.filter(parent=self)\n \n def get_level(self):\n res=0\n try:\n if self.parent!=None:\n o=self\n while (o.parent !=None):\n res+=1\n o=o.parent\n except:\n None\n return res\n \n def level_display(self):\n level=self.get_level()\n for desc in self.LEVEL_DESC:\n if desc[0]==level:\n return desc[1]\n \n def path(self): \n path=[]\n o=self\n while (o.parent != None):\n path.insert(0,o)\n o=o.parent\n path.insert(0,o)\n return path\n def get_absolute_url(self):\n return '/place/'+str(self.id)\n def __unicode__(self):\n return self.name\n \n def users(self):\n return User.objects.filter(place=self)\n\n#mptt.register(Place)\n\nclass Document(models.Model):\n name=models.CharField(max_length=60)\n place=models.ForeignKey(Place,blank=True,null=True)\n def latest_file(self):\n return DocFile.objects.filter(document=self).order_by('-id')[0]\n \nclass DocFile(models.Model):\n document=models.ForeignKey(Document)\n version=models.IntegerField()\n file_name=models.CharField(max_length=60)\n comment=models.CharField(max_length=90,blank=True,null=True)\n ctime = models.DateTimeField()\n user = models.ForeignKey(User)\n \n",
"step-ids": [
20,
22,
27,
29,
32
]
}
|
[
20,
22,
27,
29,
32
] |
count = 0
maximum = -1
m = -1
while m != 0:
m = int(input())
if m > maximum:
maximum = m
count = 1
elif m == maximum:
count += 1
print(count)
|
normal
|
{
"blob_id": "0e1ea8c7fba90c1b5d18eaa399b91f237d4defee",
"index": 2568,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile m != 0:\n m = int(input())\n if m > maximum:\n maximum = m\n count = 1\n elif m == maximum:\n count += 1\nprint(count)\n",
"step-3": "count = 0\nmaximum = -1\nm = -1\nwhile m != 0:\n m = int(input())\n if m > maximum:\n maximum = m\n count = 1\n elif m == maximum:\n count += 1\nprint(count)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def _load_credentials(creds_file=None):
"""Loads the credentials from a credentials.json file or by prompting for authentication.
Returns a credentials object to be used by the Google Sheets API.
"""
creds = None
if not creds_file:
creds_file = 'credentials.json'
if not os.path.exists(creds_file):
creds_file = os.path.join(expanduser('~'), 'credentials.json')
if not os.path.exists(creds_file):
raise SystemExit(
'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '
+ expanduser('~'))
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES
)
creds = flow.run_local_server(port=0)
with open(pickle_filename, 'wb') as token:
pickle.dump(creds, token)
return creds
def gc_get_events(num_events=50):
creds = _load_credentials()
service = build('calendar', 'v3', credentials=creds)
now = datetime.utcnow().isoformat() + 'Z'
tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.
min.time()).isoformat() + 'Z'
events_result = service.events().list(calendarId='primary', timeMin=now,
timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy
='startTime').execute()
events = events_result.get('items', [])
return events
<|reserved_special_token_0|>
def update_gcal_buffer(buffer, events):
weechat.buffer_clear(buffer)
if events == []:
weechat.prnt(buffer, 'No events for now. YAY!!!')
dates = {}
for event in events:
dt = datetime_parse(event['date'])
datestr = dt.strftime('%a %Y-%m-%d')
timestr = dt.strftime('%H:%M')
if datestr not in dates:
dates[datestr] = []
dates[datestr].append({'time': timestr, 'summary': event['summary']})
for datestr in dates.keys():
weechat.prnt(buffer, datestr)
dt_events = dates[datestr]
for event in dt_events:
weechat.prnt(buffer, '{} {}'.format(event['time'], event[
'summary']))
def get_calendar(*args):
result = []
try:
events = gc_get_events()
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
result.append({'date': start, 'summary': event['summary']})
except Exception as err:
result = err
return json.dumps(result)
def get_calendar_callback(data, command, return_code, out, err):
result = json.loads(out)
buffer = buffer_get()
update_gcal_buffer(buffer, result)
if data == CALLED_FROM_TIMER:
for event in result:
dt = datetime_parse(event['date'])
now = datetime.now(tz=dt.tzinfo)
timediff = dt - now
minutes_remaining = math.ceil(timediff.total_seconds() / 60)
if minutes_remaining in NOTIFICATION_THRESHOLDS:
msg = '[{}m] {}'.format(minutes_remaining, event['summary'])
weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)
return weechat.WEECHAT_RC_OK
def gcal_command(data, buffer, args):
buffer = buffer_get()
if args == 'init':
pass
else:
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_CMD)
return weechat.WEECHAT_RC_OK
def script_main(data, remaining_calls):
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_TIMER)
return weechat.WEECHAT_RC_OK
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _load_credentials(creds_file=None):
"""Loads the credentials from a credentials.json file or by prompting for authentication.
Returns a credentials object to be used by the Google Sheets API.
"""
creds = None
if not creds_file:
creds_file = 'credentials.json'
if not os.path.exists(creds_file):
creds_file = os.path.join(expanduser('~'), 'credentials.json')
if not os.path.exists(creds_file):
raise SystemExit(
'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '
+ expanduser('~'))
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES
)
creds = flow.run_local_server(port=0)
with open(pickle_filename, 'wb') as token:
pickle.dump(creds, token)
return creds
def gc_get_events(num_events=50):
creds = _load_credentials()
service = build('calendar', 'v3', credentials=creds)
now = datetime.utcnow().isoformat() + 'Z'
tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.
min.time()).isoformat() + 'Z'
events_result = service.events().list(calendarId='primary', timeMin=now,
timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy
='startTime').execute()
events = events_result.get('items', [])
return events
def buffer_get():
"""Finds or creates a buffer to use for script output.
Returns a buffer pointer.
"""
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', 'Google Calendar')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
return buffer
def buffer_input(data, buffer, input_data):
"""A function called when text, that is not a command, is entered
in the weechat-gcal buffer. This function exists to prevent
errors from being shown, there is no functionality.
"""
return weechat.WEECHAT_RC_OK
def update_gcal_buffer(buffer, events):
weechat.buffer_clear(buffer)
if events == []:
weechat.prnt(buffer, 'No events for now. YAY!!!')
dates = {}
for event in events:
dt = datetime_parse(event['date'])
datestr = dt.strftime('%a %Y-%m-%d')
timestr = dt.strftime('%H:%M')
if datestr not in dates:
dates[datestr] = []
dates[datestr].append({'time': timestr, 'summary': event['summary']})
for datestr in dates.keys():
weechat.prnt(buffer, datestr)
dt_events = dates[datestr]
for event in dt_events:
weechat.prnt(buffer, '{} {}'.format(event['time'], event[
'summary']))
def get_calendar(*args):
result = []
try:
events = gc_get_events()
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
result.append({'date': start, 'summary': event['summary']})
except Exception as err:
result = err
return json.dumps(result)
def get_calendar_callback(data, command, return_code, out, err):
result = json.loads(out)
buffer = buffer_get()
update_gcal_buffer(buffer, result)
if data == CALLED_FROM_TIMER:
for event in result:
dt = datetime_parse(event['date'])
now = datetime.now(tz=dt.tzinfo)
timediff = dt - now
minutes_remaining = math.ceil(timediff.total_seconds() / 60)
if minutes_remaining in NOTIFICATION_THRESHOLDS:
msg = '[{}m] {}'.format(minutes_remaining, event['summary'])
weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)
return weechat.WEECHAT_RC_OK
def gcal_command(data, buffer, args):
buffer = buffer_get()
if args == 'init':
pass
else:
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_CMD)
return weechat.WEECHAT_RC_OK
def script_main(data, remaining_calls):
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_TIMER)
return weechat.WEECHAT_RC_OK
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)
weechat.hook_command('gcal',
'Displays events for today and tomorrow in a new buffer.', '[init]',
' || init - Initializes the items needed for this plugin to work.', '',
'gcal_command', '')
weechat.hook_timer(60000, 60, 0, 'script_main', '')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SCRIPT_NAME = 'weechat-gcal'
SCRIPT_AUTHOR = 'Dave Mulford'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL2'
SCRIPT_DESC = (
'A Google Calendar integration script that provides notifications of upcoming events.'
)
SCRIPT_SHUTDOWN_FN = ''
SCRIPT_CHARSET = ''
TIMEOUT_MS = 3000
CALLED_FROM_CMD = '100'
CALLED_FROM_TIMER = '200'
NOTIFICATION_THRESHOLDS = [5, 15]
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
CACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')
def _load_credentials(creds_file=None):
"""Loads the credentials from a credentials.json file or by prompting for authentication.
Returns a credentials object to be used by the Google Sheets API.
"""
creds = None
if not creds_file:
creds_file = 'credentials.json'
if not os.path.exists(creds_file):
creds_file = os.path.join(expanduser('~'), 'credentials.json')
if not os.path.exists(creds_file):
raise SystemExit(
'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '
+ expanduser('~'))
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES
)
creds = flow.run_local_server(port=0)
with open(pickle_filename, 'wb') as token:
pickle.dump(creds, token)
return creds
def gc_get_events(num_events=50):
creds = _load_credentials()
service = build('calendar', 'v3', credentials=creds)
now = datetime.utcnow().isoformat() + 'Z'
tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.
min.time()).isoformat() + 'Z'
events_result = service.events().list(calendarId='primary', timeMin=now,
timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy
='startTime').execute()
events = events_result.get('items', [])
return events
def buffer_get():
"""Finds or creates a buffer to use for script output.
Returns a buffer pointer.
"""
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', 'Google Calendar')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
return buffer
def buffer_input(data, buffer, input_data):
"""A function called when text, that is not a command, is entered
in the weechat-gcal buffer. This function exists to prevent
errors from being shown, there is no functionality.
"""
return weechat.WEECHAT_RC_OK
def update_gcal_buffer(buffer, events):
weechat.buffer_clear(buffer)
if events == []:
weechat.prnt(buffer, 'No events for now. YAY!!!')
dates = {}
for event in events:
dt = datetime_parse(event['date'])
datestr = dt.strftime('%a %Y-%m-%d')
timestr = dt.strftime('%H:%M')
if datestr not in dates:
dates[datestr] = []
dates[datestr].append({'time': timestr, 'summary': event['summary']})
for datestr in dates.keys():
weechat.prnt(buffer, datestr)
dt_events = dates[datestr]
for event in dt_events:
weechat.prnt(buffer, '{} {}'.format(event['time'], event[
'summary']))
def get_calendar(*args):
result = []
try:
events = gc_get_events()
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
result.append({'date': start, 'summary': event['summary']})
except Exception as err:
result = err
return json.dumps(result)
def get_calendar_callback(data, command, return_code, out, err):
result = json.loads(out)
buffer = buffer_get()
update_gcal_buffer(buffer, result)
if data == CALLED_FROM_TIMER:
for event in result:
dt = datetime_parse(event['date'])
now = datetime.now(tz=dt.tzinfo)
timediff = dt - now
minutes_remaining = math.ceil(timediff.total_seconds() / 60)
if minutes_remaining in NOTIFICATION_THRESHOLDS:
msg = '[{}m] {}'.format(minutes_remaining, event['summary'])
weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)
return weechat.WEECHAT_RC_OK
def gcal_command(data, buffer, args):
buffer = buffer_get()
if args == 'init':
pass
else:
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_CMD)
return weechat.WEECHAT_RC_OK
def script_main(data, remaining_calls):
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_TIMER)
return weechat.WEECHAT_RC_OK
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)
weechat.hook_command('gcal',
'Displays events for today and tomorrow in a new buffer.', '[init]',
' || init - Initializes the items needed for this plugin to work.', '',
'gcal_command', '')
weechat.hook_timer(60000, 60, 0, 'script_main', '')
<|reserved_special_token_1|>
from __future__ import print_function
import weechat
import sys
import pickle
import json
import math
import os.path
from datetime import datetime
from datetime import date
from datetime import timedelta
from dateutil.parser import parse as datetime_parse
from os.path import expanduser
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCRIPT_NAME = 'weechat-gcal'
SCRIPT_AUTHOR = 'Dave Mulford'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL2'
SCRIPT_DESC = (
'A Google Calendar integration script that provides notifications of upcoming events.'
)
SCRIPT_SHUTDOWN_FN = ''
SCRIPT_CHARSET = ''
TIMEOUT_MS = 3000
CALLED_FROM_CMD = '100'
CALLED_FROM_TIMER = '200'
NOTIFICATION_THRESHOLDS = [5, 15]
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
CACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')
def _load_credentials(creds_file=None):
"""Loads the credentials from a credentials.json file or by prompting for authentication.
Returns a credentials object to be used by the Google Sheets API.
"""
creds = None
if not creds_file:
creds_file = 'credentials.json'
if not os.path.exists(creds_file):
creds_file = os.path.join(expanduser('~'), 'credentials.json')
if not os.path.exists(creds_file):
raise SystemExit(
'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '
+ expanduser('~'))
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES
)
creds = flow.run_local_server(port=0)
with open(pickle_filename, 'wb') as token:
pickle.dump(creds, token)
return creds
def gc_get_events(num_events=50):
creds = _load_credentials()
service = build('calendar', 'v3', credentials=creds)
now = datetime.utcnow().isoformat() + 'Z'
tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.
min.time()).isoformat() + 'Z'
events_result = service.events().list(calendarId='primary', timeMin=now,
timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy
='startTime').execute()
events = events_result.get('items', [])
return events
def buffer_get():
"""Finds or creates a buffer to use for script output.
Returns a buffer pointer.
"""
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', 'Google Calendar')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
return buffer
def buffer_input(data, buffer, input_data):
"""A function called when text, that is not a command, is entered
in the weechat-gcal buffer. This function exists to prevent
errors from being shown, there is no functionality.
"""
return weechat.WEECHAT_RC_OK
def update_gcal_buffer(buffer, events):
weechat.buffer_clear(buffer)
if events == []:
weechat.prnt(buffer, 'No events for now. YAY!!!')
dates = {}
for event in events:
dt = datetime_parse(event['date'])
datestr = dt.strftime('%a %Y-%m-%d')
timestr = dt.strftime('%H:%M')
if datestr not in dates:
dates[datestr] = []
dates[datestr].append({'time': timestr, 'summary': event['summary']})
for datestr in dates.keys():
weechat.prnt(buffer, datestr)
dt_events = dates[datestr]
for event in dt_events:
weechat.prnt(buffer, '{} {}'.format(event['time'], event[
'summary']))
def get_calendar(*args):
result = []
try:
events = gc_get_events()
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
result.append({'date': start, 'summary': event['summary']})
except Exception as err:
result = err
return json.dumps(result)
def get_calendar_callback(data, command, return_code, out, err):
result = json.loads(out)
buffer = buffer_get()
update_gcal_buffer(buffer, result)
if data == CALLED_FROM_TIMER:
for event in result:
dt = datetime_parse(event['date'])
now = datetime.now(tz=dt.tzinfo)
timediff = dt - now
minutes_remaining = math.ceil(timediff.total_seconds() / 60)
if minutes_remaining in NOTIFICATION_THRESHOLDS:
msg = '[{}m] {}'.format(minutes_remaining, event['summary'])
weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)
return weechat.WEECHAT_RC_OK
def gcal_command(data, buffer, args):
buffer = buffer_get()
if args == 'init':
pass
else:
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_CMD)
return weechat.WEECHAT_RC_OK
def script_main(data, remaining_calls):
weechat.hook_process('func:get_calendar', TIMEOUT_MS,
'get_calendar_callback', CALLED_FROM_TIMER)
return weechat.WEECHAT_RC_OK
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)
weechat.hook_command('gcal',
'Displays events for today and tomorrow in a new buffer.', '[init]',
' || init - Initializes the items needed for this plugin to work.', '',
'gcal_command', '')
weechat.hook_timer(60000, 60, 0, 'script_main', '')
<|reserved_special_token_1|>
#!/usr/bin/env python
from __future__ import print_function
import weechat
import sys
import pickle
import json
import math
import os.path
from datetime import datetime
from datetime import date
from datetime import timedelta
from dateutil.parser import parse as datetime_parse
from os.path import expanduser
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# TODO: Add settings
# minutes_remaining = [5, 10, 15]
# notify_enabled = yes/no
# time_format = '%H:%M' ???
SCRIPT_NAME = 'weechat-gcal'
SCRIPT_AUTHOR = 'Dave Mulford'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL2'
SCRIPT_DESC = 'A Google Calendar integration script that provides notifications of upcoming events.'
SCRIPT_SHUTDOWN_FN = ''
SCRIPT_CHARSET = ''
TIMEOUT_MS = 3000
CALLED_FROM_CMD = '100'
CALLED_FROM_TIMER = '200'
NOTIFICATION_THRESHOLDS = [5,15]
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
# Where the weechat-gcal-token.pickle file is located
CACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')
# =============================
# GOOGLE CALENDAR FUNCTIONS
# =============================
def _load_credentials(creds_file=None):
"""Loads the credentials from a credentials.json file or by prompting for authentication.
Returns a credentials object to be used by the Google Sheets API.
"""
creds = None
# Validate the credentials file
if not creds_file:
creds_file = 'credentials.json'
if not os.path.exists(creds_file):
creds_file = os.path.join(expanduser('~'), 'credentials.json')
if not os.path.exists(creds_file):
raise SystemExit('Could not find a credentials.json file. ' \
'Either pass one as argument or make sure credentials.json exists in ' \
'the current directory or ' + expanduser('~'))
# Creates CACHE_DIR if it does not exist
# mode 0x777 (the default) is used because the system's umask value is masked out first
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first time.
if os.path.exists(pickle_filename):
with open(pickle_filename, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(pickle_filename, 'wb') as token:
pickle.dump(creds, token)
return creds
def gc_get_events(num_events=50):
creds = _load_credentials()
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
tomorrow = datetime.combine( \
date.today() + timedelta(days=2), \
datetime.min.time()) \
.isoformat() + 'Z'
#print('Getting the upcoming {} events between {} and {}'.format(num_events, now, tomorrow))
events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=tomorrow,
maxResults=num_events, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return events
# =============================
# WEECHAT HELPER FUNCTIONS
# =============================
def buffer_get():
"""Finds or creates a buffer to use for script output.
Returns a buffer pointer.
"""
buffer = weechat.buffer_search('python', SCRIPT_NAME)
if not buffer:
buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')
weechat.buffer_set(buffer, 'time_for_each_line', '0')
weechat.buffer_set(buffer, 'nicklist', '0')
weechat.buffer_set(buffer, 'title', 'Google Calendar')
weechat.buffer_set(buffer, 'localvar_set_no_log', '1')
return buffer
def buffer_input(data, buffer, input_data):
"""A function called when text, that is not a command, is entered
in the weechat-gcal buffer. This function exists to prevent
errors from being shown, there is no functionality.
"""
return weechat.WEECHAT_RC_OK
def update_gcal_buffer(buffer, events):
weechat.buffer_clear(buffer)
if events == []:
weechat.prnt(buffer, 'No events for now. YAY!!!')
dates = {}
for event in events:
dt = datetime_parse(event['date'])
datestr = dt.strftime('%a %Y-%m-%d')
timestr = dt.strftime('%H:%M')
if datestr not in dates:
dates[datestr] = []
dates[datestr].append({
'time': timestr,
'summary': event['summary']
})
for datestr in dates.keys():
weechat.prnt(buffer, datestr)
dt_events = dates[datestr]
for event in dt_events:
weechat.prnt(buffer, '{} {}'.format(event['time'], event['summary']))
# =============================
# MAIN SCRIPT FUNCTIONS
# =============================
def get_calendar(*args):
result = []
try:
events = gc_get_events()
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
result.append({
'date': start,
'summary': event['summary']
})
except Exception as err:
result = err
return json.dumps(result)
def get_calendar_callback(data, command, return_code, out, err):
result = json.loads(out)
buffer = buffer_get()
update_gcal_buffer(buffer, result)
# Notify if any events are happening in 10 minutes!
if data == CALLED_FROM_TIMER:
for event in result:
#weechat.prnt(buffer, 'Handling event!')
dt = datetime_parse(event['date'])
now = datetime.now(tz=dt.tzinfo)
timediff = dt - now
minutes_remaining = math.ceil(timediff.total_seconds() / 60)
#weechat.prnt(buffer, '{} - {} = {} ({} mins)'.format(dt, now, timediff, minutes_remaining))
# TODO Make minutes_remaining threshold configurable
if minutes_remaining in NOTIFICATION_THRESHOLDS:
msg = '[{}m] {}'.format(minutes_remaining, event['summary'])
weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)
return weechat.WEECHAT_RC_OK
def gcal_command(data, buffer, args):
buffer = buffer_get()
# TODO Implement init
if args == 'init':
pass
else:
weechat.hook_process(
'func:get_calendar',
TIMEOUT_MS,
'get_calendar_callback',
CALLED_FROM_CMD
)
return weechat.WEECHAT_RC_OK
def script_main(data, remaining_calls):
# Weechat is single-threaded so a new process is created so other things aren't held up
# if retrieving Google Calendar events doesn't return in a timely manner.
# https://weechat.org/files/doc/stable/weechat_scripting.en.html#weechat_architecture
weechat.hook_process(
'func:get_calendar',
TIMEOUT_MS,
'get_calendar_callback',
CALLED_FROM_TIMER
)
return weechat.WEECHAT_RC_OK
# Register the script on /script load
# This needs to happen first!
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, \
SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)
# Setup a command to initialize the Google Calendar authentication and show events in a buffer.
weechat.hook_command(
'gcal',
'Displays events for today and tomorrow in a new buffer.',
'[init]',
' || init - Initializes the items needed for this plugin to work.',
'',
'gcal_command',
''
)
# Check once per minute whether we should notify of imminent events
weechat.hook_timer(60000, 60, 0, 'script_main', '')
|
flexible
|
{
"blob_id": "0ed0fb6f9bcc768bb005222c9ae9b454f6d962ec",
"index": 9148,
"step-1": "<mask token>\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\n<mask token>\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-3": "<mask token>\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = (\n 'A Google Calendar integration script that provides notifications of upcoming events.'\n )\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\nTIMEOUT_MS = 3000\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\nNOTIFICATION_THRESHOLDS = [5, 15]\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-4": "from __future__ import print_function\nimport weechat\nimport sys\nimport pickle\nimport json\nimport math\nimport os.path\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.parser import parse as datetime_parse\nfrom os.path import expanduser\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = (\n 'A Google Calendar integration script that provides notifications of upcoming events.'\n )\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\nTIMEOUT_MS = 3000\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\nNOTIFICATION_THRESHOLDS = [5, 15]\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n creds = None\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit(\n 'Could not find a credentials.json file. Either pass one as argument or make sure credentials.json exists in the current directory or '\n + expanduser('~'))\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES\n )\n creds = flow.run_local_server(port=0)\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n return creds\n\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n now = datetime.utcnow().isoformat() + 'Z'\n tomorrow = datetime.combine(date.today() + timedelta(days=2), datetime.\n min.time()).isoformat() + 'Z'\n events_result = service.events().list(calendarId='primary', timeMin=now,\n timeMax=tomorrow, maxResults=num_events, singleEvents=True, orderBy\n ='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n return buffer\n\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n if datestr not in dates:\n dates[datestr] = []\n dates[datestr].append({'time': timestr, 'summary': event['summary']})\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event[\n 'summary']))\n\n\ndef get_calendar(*args):\n result = []\n try:\n events = gc_get_events()\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({'date': start, 'summary': event['summary']})\n except Exception as err:\n result = err\n return json.dumps(result)\n\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n if data == CALLED_FROM_TIMER:\n for event in result:\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n return weechat.WEECHAT_RC_OK\n\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n if args == 'init':\n pass\n else:\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_CMD)\n return weechat.WEECHAT_RC_OK\n\n\ndef script_main(data, remaining_calls):\n weechat.hook_process('func:get_calendar', TIMEOUT_MS,\n 'get_calendar_callback', CALLED_FROM_TIMER)\n return weechat.WEECHAT_RC_OK\n\n\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,\n SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\nweechat.hook_command('gcal',\n 'Displays events for today and tomorrow in a new buffer.', '[init]',\n ' || init - Initializes the items needed for this plugin to work.', '',\n 'gcal_command', '')\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport weechat\nimport sys\nimport pickle\nimport json\nimport math\nimport os.path\nfrom datetime import datetime\nfrom datetime import date\nfrom datetime import timedelta\nfrom dateutil.parser import parse as datetime_parse\nfrom os.path import expanduser\n\nfrom googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\n# TODO: Add settings\n# minutes_remaining = [5, 10, 15]\n# notify_enabled = yes/no\n# time_format = '%H:%M' ???\n\nSCRIPT_NAME = 'weechat-gcal'\nSCRIPT_AUTHOR = 'Dave Mulford'\nSCRIPT_VERSION = '0.1'\nSCRIPT_LICENSE = 'GPL2'\nSCRIPT_DESC = 'A Google Calendar integration script that provides notifications of upcoming events.'\nSCRIPT_SHUTDOWN_FN = ''\nSCRIPT_CHARSET = ''\n\nTIMEOUT_MS = 3000\n\nCALLED_FROM_CMD = '100'\nCALLED_FROM_TIMER = '200'\n\nNOTIFICATION_THRESHOLDS = [5,15]\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/calendar.readonly']\n\n# Where the weechat-gcal-token.pickle file is located\nCACHE_DIR = os.path.join(expanduser('~'), '.cache', 'weechat-gcal')\n\n# =============================\n# GOOGLE CALENDAR FUNCTIONS\n# =============================\n\ndef _load_credentials(creds_file=None):\n \"\"\"Loads the credentials from a credentials.json file or by prompting for authentication.\n Returns a credentials object to be used by the Google Sheets API.\n \"\"\"\n\n creds = None\n\n # Validate the credentials file\n if not creds_file:\n creds_file = 'credentials.json'\n if not os.path.exists(creds_file):\n creds_file = os.path.join(expanduser('~'), 'credentials.json')\n if not os.path.exists(creds_file):\n raise SystemExit('Could not find a credentials.json file. ' \\\n 'Either pass one as argument or make sure credentials.json exists in ' \\\n 'the current directory or ' + expanduser('~'))\n\n # Creates CACHE_DIR if it does not exist\n # mode 0x777 (the default) is used because the system's umask value is masked out first\n if not os.path.exists(CACHE_DIR):\n os.mkdir(CACHE_DIR)\n\n pickle_filename = os.path.join(CACHE_DIR, 'weechat-gcal-token.pickle')\n\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if os.path.exists(pickle_filename):\n with open(pickle_filename, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(creds_file, SCOPES)\n creds = flow.run_local_server(port=0)\n\n # Save the credentials for the next run\n with open(pickle_filename, 'wb') as token:\n pickle.dump(creds, token)\n\n return creds\n\ndef gc_get_events(num_events=50):\n creds = _load_credentials()\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n tomorrow = datetime.combine( \\\n date.today() + timedelta(days=2), \\\n datetime.min.time()) \\\n .isoformat() + 'Z'\n\n #print('Getting the upcoming {} events between {} and {}'.format(num_events, now, tomorrow))\n events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=tomorrow,\n maxResults=num_events, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n return events\n\n# =============================\n# WEECHAT HELPER FUNCTIONS\n# =============================\n\ndef buffer_get():\n \"\"\"Finds or creates a buffer to use for script output.\n Returns a buffer pointer.\n \"\"\"\n buffer = weechat.buffer_search('python', SCRIPT_NAME)\n\n if not buffer:\n buffer = weechat.buffer_new(SCRIPT_NAME, 'buffer_input', '', '', '')\n weechat.buffer_set(buffer, 'time_for_each_line', '0')\n weechat.buffer_set(buffer, 'nicklist', '0')\n weechat.buffer_set(buffer, 'title', 'Google Calendar')\n weechat.buffer_set(buffer, 'localvar_set_no_log', '1')\n\n return buffer\n\ndef buffer_input(data, buffer, input_data):\n \"\"\"A function called when text, that is not a command, is entered\n in the weechat-gcal buffer. This function exists to prevent\n errors from being shown, there is no functionality.\n \"\"\"\n return weechat.WEECHAT_RC_OK\n\ndef update_gcal_buffer(buffer, events):\n weechat.buffer_clear(buffer)\n\n if events == []:\n weechat.prnt(buffer, 'No events for now. YAY!!!')\n\n dates = {}\n for event in events:\n dt = datetime_parse(event['date'])\n datestr = dt.strftime('%a %Y-%m-%d')\n timestr = dt.strftime('%H:%M')\n\n if datestr not in dates:\n dates[datestr] = []\n\n dates[datestr].append({\n 'time': timestr,\n 'summary': event['summary']\n })\n\n for datestr in dates.keys():\n weechat.prnt(buffer, datestr)\n\n dt_events = dates[datestr]\n for event in dt_events:\n weechat.prnt(buffer, '{} {}'.format(event['time'], event['summary']))\n\n# =============================\n# MAIN SCRIPT FUNCTIONS\n# =============================\n\ndef get_calendar(*args):\n result = []\n\n try:\n events = gc_get_events()\n\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n result.append({\n 'date': start,\n 'summary': event['summary']\n })\n except Exception as err:\n result = err\n\n return json.dumps(result)\n\ndef get_calendar_callback(data, command, return_code, out, err):\n result = json.loads(out)\n\n buffer = buffer_get()\n update_gcal_buffer(buffer, result)\n\n # Notify if any events are happening in 10 minutes!\n if data == CALLED_FROM_TIMER:\n for event in result:\n #weechat.prnt(buffer, 'Handling event!')\n dt = datetime_parse(event['date'])\n now = datetime.now(tz=dt.tzinfo)\n timediff = dt - now\n minutes_remaining = math.ceil(timediff.total_seconds() / 60)\n\n #weechat.prnt(buffer, '{} - {} = {} ({} mins)'.format(dt, now, timediff, minutes_remaining))\n\n # TODO Make minutes_remaining threshold configurable\n if minutes_remaining in NOTIFICATION_THRESHOLDS:\n msg = '[{}m] {}'.format(minutes_remaining, event['summary'])\n weechat.prnt_date_tags(buffer, 0, 'notify_highlight', msg)\n\n return weechat.WEECHAT_RC_OK\n\ndef gcal_command(data, buffer, args):\n buffer = buffer_get()\n\n # TODO Implement init\n if args == 'init':\n pass\n else:\n weechat.hook_process(\n 'func:get_calendar',\n TIMEOUT_MS,\n 'get_calendar_callback',\n CALLED_FROM_CMD\n )\n\n return weechat.WEECHAT_RC_OK\n\ndef script_main(data, remaining_calls):\n # Weechat is single-threaded so a new process is created so other things aren't held up\n # if retrieving Google Calendar events doesn't return in a timely manner.\n # https://weechat.org/files/doc/stable/weechat_scripting.en.html#weechat_architecture\n weechat.hook_process(\n 'func:get_calendar',\n TIMEOUT_MS,\n 'get_calendar_callback',\n CALLED_FROM_TIMER\n )\n\n return weechat.WEECHAT_RC_OK\n\n# Register the script on /script load\n# This needs to happen first!\nweechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, \\\n SCRIPT_LICENSE, SCRIPT_DESC, SCRIPT_SHUTDOWN_FN, SCRIPT_CHARSET)\n\n# Setup a command to initialize the Google Calendar authentication and show events in a buffer.\nweechat.hook_command(\n 'gcal',\n 'Displays events for today and tomorrow in a new buffer.',\n '[init]',\n ' || init - Initializes the items needed for this plugin to work.',\n '',\n 'gcal_command',\n ''\n)\n\n# Check once per minute whether we should notify of imminent events\nweechat.hook_timer(60000, 60, 0, 'script_main', '')\n",
"step-ids": [
7,
10,
11,
12,
13
]
}
|
[
7,
10,
11,
12,
13
] |
with open("file.txt", 'r') as fh:
data = fh.readline()
lis= data.split(' ')
my_dict={}
for key in lis:
if key in my_dict.keys():
my_dict[key] += 1
else:
my_dict[key] = 1
print(my_dict)
|
normal
|
{
"blob_id": "8cd582915c5abd96a4ef8a3a5309311f2a73a156",
"index": 460,
"step-1": "<mask token>\n",
"step-2": "with open('file.txt', 'r') as fh:\n data = fh.readline()\n<mask token>\nfor key in lis:\n if key in my_dict.keys():\n my_dict[key] += 1\n else:\n my_dict[key] = 1\nprint(my_dict)\n",
"step-3": "with open('file.txt', 'r') as fh:\n data = fh.readline()\nlis = data.split(' ')\nmy_dict = {}\nfor key in lis:\n if key in my_dict.keys():\n my_dict[key] += 1\n else:\n my_dict[key] = 1\nprint(my_dict)\n",
"step-4": "\n\nwith open(\"file.txt\", 'r') as fh:\n data = fh.readline()\n\nlis= data.split(' ')\nmy_dict={}\n\nfor key in lis:\n if key in my_dict.keys():\n my_dict[key] += 1\n else:\n my_dict[key] = 1\n\nprint(my_dict)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.
Z_DEFAULT_COMPRESSION):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PickleHandler(IHandler):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.
Z_DEFAULT_COMPRESSION):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PickleHandler(IHandler):
dumps = staticmethod(pickle.dumps)
loads = staticmethod(pickle.loads)
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.
Z_DEFAULT_COMPRESSION):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
<|reserved_special_token_1|>
import json
import pickle
import zlib
from diskcollections.interfaces import IHandler
class PickleHandler(IHandler):
dumps = staticmethod(pickle.dumps)
loads = staticmethod(pickle.loads)
class PickleZLibHandler(IHandler):
@staticmethod
def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.
Z_DEFAULT_COMPRESSION):
pickled = pickle.dumps(obj, protocol=protocol)
compressed = zlib.compress(pickled, level)
return compressed
@staticmethod
def loads(compressed):
pickled = zlib.decompress(compressed)
obj = pickle.loads(pickled)
return obj
class JsonHandler(IHandler):
dumps = staticmethod(json.dumps)
loads = staticmethod(json.loads)
class JsonZLibHandler(IHandler):
@staticmethod
def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):
jsoned = json.dumps(obj).encode()
compressed = zlib.compress(jsoned, level)
return compressed
@staticmethod
def loads(compressed):
jsoned = zlib.decompress(compressed).decode()
obj = json.loads(jsoned)
return obj
|
flexible
|
{
"blob_id": "60202758a0a42fc26dc1bca9f134a70f28967093",
"index": 2728,
"step-1": "<mask token>\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-2": "<mask token>\n\n\nclass PickleHandler(IHandler):\n <mask token>\n <mask token>\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-3": "<mask token>\n\n\nclass PickleHandler(IHandler):\n dumps = staticmethod(pickle.dumps)\n loads = staticmethod(pickle.loads)\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-4": "import json\nimport pickle\nimport zlib\nfrom diskcollections.interfaces import IHandler\n\n\nclass PickleHandler(IHandler):\n dumps = staticmethod(pickle.dumps)\n loads = staticmethod(pickle.loads)\n\n\nclass PickleZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL, level=zlib.\n Z_DEFAULT_COMPRESSION):\n pickled = pickle.dumps(obj, protocol=protocol)\n compressed = zlib.compress(pickled, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n pickled = zlib.decompress(compressed)\n obj = pickle.loads(pickled)\n return obj\n\n\nclass JsonHandler(IHandler):\n dumps = staticmethod(json.dumps)\n loads = staticmethod(json.loads)\n\n\nclass JsonZLibHandler(IHandler):\n\n @staticmethod\n def dumps(obj, level=zlib.Z_DEFAULT_COMPRESSION):\n jsoned = json.dumps(obj).encode()\n compressed = zlib.compress(jsoned, level)\n return compressed\n\n @staticmethod\n def loads(compressed):\n jsoned = zlib.decompress(compressed).decode()\n obj = json.loads(jsoned)\n return obj\n",
"step-5": null,
"step-ids": [
8,
9,
10,
11
]
}
|
[
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for d, dirs, files in os.walk(top):
for f in files:
i = f.find('.')
if i == -1:
i = 0
suf = f[i:]
rec = cnts.setdefault(suf, [0, 0])
fn = d + '/' + f
if os.path.islink(fn):
sz = 0
else:
sz = os.path.getsize(d + '/' + f)
rec[0] += 1
rec[1] += float(sz) / 1024 ** 4
<|reserved_special_token_0|>
print('Total %.3f' % total)
for sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts],
reverse=True)[:max]:
print('%s\t%d\t%.3f' % (suf, cnt, sz))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
top = sys.argv[1]
max = int(sys.argv[2])
cnts = {}
for d, dirs, files in os.walk(top):
for f in files:
i = f.find('.')
if i == -1:
i = 0
suf = f[i:]
rec = cnts.setdefault(suf, [0, 0])
fn = d + '/' + f
if os.path.islink(fn):
sz = 0
else:
sz = os.path.getsize(d + '/' + f)
rec[0] += 1
rec[1] += float(sz) / 1024 ** 4
recs = sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)
total = sum([rec[0] for rec in recs])
print('Total %.3f' % total)
for sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts],
reverse=True)[:max]:
print('%s\t%d\t%.3f' % (suf, cnt, sz))
<|reserved_special_token_1|>
import os, sys
top = sys.argv[1]
max = int(sys.argv[2])
cnts = {}
for d, dirs, files in os.walk(top):
for f in files:
i = f.find('.')
if i == -1:
i = 0
suf = f[i:]
rec = cnts.setdefault(suf, [0, 0])
fn = d + '/' + f
if os.path.islink(fn):
sz = 0
else:
sz = os.path.getsize(d + '/' + f)
rec[0] += 1
rec[1] += float(sz) / 1024 ** 4
recs = sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)
total = sum([rec[0] for rec in recs])
print('Total %.3f' % total)
for sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts],
reverse=True)[:max]:
print('%s\t%d\t%.3f' % (suf, cnt, sz))
<|reserved_special_token_1|>
import os, sys
top=sys.argv[1]
max=int(sys.argv[2])
cnts={}
for d, dirs, files in os.walk(top):
for f in files:
i=f.find(".")
if i ==-1: i=0
suf=f[i:]
rec=cnts.setdefault(suf, [0,0])
fn=d+'/'+f
if os.path.islink(fn):
sz=0
else:
sz=os.path.getsize(d+'/'+f)
rec[0]+=1; rec[1]+=float(sz)/(1024**4)
recs=sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)
total=sum([rec[0] for rec in recs])
print ("Total %.3f" % total)
for sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)[:max]:
print ("%s\t%d\t%.3f" % (suf, cnt, sz))
|
flexible
|
{
"blob_id": "06aa2d261e31dfe2f0ef66dca01c1fe3db1ca94e",
"index": 7940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor d, dirs, files in os.walk(top):\n for f in files:\n i = f.find('.')\n if i == -1:\n i = 0\n suf = f[i:]\n rec = cnts.setdefault(suf, [0, 0])\n fn = d + '/' + f\n if os.path.islink(fn):\n sz = 0\n else:\n sz = os.path.getsize(d + '/' + f)\n rec[0] += 1\n rec[1] += float(sz) / 1024 ** 4\n<mask token>\nprint('Total %.3f' % total)\nfor sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts],\n reverse=True)[:max]:\n print('%s\\t%d\\t%.3f' % (suf, cnt, sz))\n",
"step-3": "<mask token>\ntop = sys.argv[1]\nmax = int(sys.argv[2])\ncnts = {}\nfor d, dirs, files in os.walk(top):\n for f in files:\n i = f.find('.')\n if i == -1:\n i = 0\n suf = f[i:]\n rec = cnts.setdefault(suf, [0, 0])\n fn = d + '/' + f\n if os.path.islink(fn):\n sz = 0\n else:\n sz = os.path.getsize(d + '/' + f)\n rec[0] += 1\n rec[1] += float(sz) / 1024 ** 4\nrecs = sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)\ntotal = sum([rec[0] for rec in recs])\nprint('Total %.3f' % total)\nfor sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts],\n reverse=True)[:max]:\n print('%s\\t%d\\t%.3f' % (suf, cnt, sz))\n",
"step-4": "import os, sys\ntop = sys.argv[1]\nmax = int(sys.argv[2])\ncnts = {}\nfor d, dirs, files in os.walk(top):\n for f in files:\n i = f.find('.')\n if i == -1:\n i = 0\n suf = f[i:]\n rec = cnts.setdefault(suf, [0, 0])\n fn = d + '/' + f\n if os.path.islink(fn):\n sz = 0\n else:\n sz = os.path.getsize(d + '/' + f)\n rec[0] += 1\n rec[1] += float(sz) / 1024 ** 4\nrecs = sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)\ntotal = sum([rec[0] for rec in recs])\nprint('Total %.3f' % total)\nfor sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts],\n reverse=True)[:max]:\n print('%s\\t%d\\t%.3f' % (suf, cnt, sz))\n",
"step-5": "\nimport os, sys\n\ntop=sys.argv[1]\nmax=int(sys.argv[2])\n\ncnts={}\n\nfor d, dirs, files in os.walk(top):\n for f in files:\n i=f.find(\".\")\n if i ==-1: i=0\n suf=f[i:]\n rec=cnts.setdefault(suf, [0,0])\n fn=d+'/'+f\n if os.path.islink(fn):\n sz=0\n else:\n sz=os.path.getsize(d+'/'+f)\n rec[0]+=1; rec[1]+=float(sz)/(1024**4)\n\nrecs=sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)\ntotal=sum([rec[0] for rec in recs])\nprint (\"Total %.3f\" % total)\n\nfor sz, cnt, suf in sorted([(cnts[k][1], cnts[k][0], k) for k in cnts], reverse=True)[:max]:\n print (\"%s\\t%d\\t%.3f\" % (suf, cnt, sz))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pytest
import kdlc
from shutil import rmtree
import os
# from .context import kdlc
test_generated_dir = os.path.dirname(__file__) + "/generated/"
@pytest.fixture(scope="session")
def my_setup(request):
print("\nDoing setup")
def fin():
print("\nDoing teardown")
if os.path.exists(test_generated_dir):
rmtree(test_generated_dir)
kdlc.cleanup()
request.addfinalizer(fin)
|
normal
|
{
"blob_id": "7ff029e2f0054146e438f4e4f13269e83e28c469",
"index": 8727,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture(scope='session')\ndef my_setup(request):\n print('\\nDoing setup')\n\n def fin():\n print('\\nDoing teardown')\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n kdlc.cleanup()\n request.addfinalizer(fin)\n",
"step-3": "<mask token>\ntest_generated_dir = os.path.dirname(__file__) + '/generated/'\n\n\n@pytest.fixture(scope='session')\ndef my_setup(request):\n print('\\nDoing setup')\n\n def fin():\n print('\\nDoing teardown')\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n kdlc.cleanup()\n request.addfinalizer(fin)\n",
"step-4": "import pytest\nimport kdlc\nfrom shutil import rmtree\nimport os\ntest_generated_dir = os.path.dirname(__file__) + '/generated/'\n\n\n@pytest.fixture(scope='session')\ndef my_setup(request):\n print('\\nDoing setup')\n\n def fin():\n print('\\nDoing teardown')\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n kdlc.cleanup()\n request.addfinalizer(fin)\n",
"step-5": "import pytest\nimport kdlc\nfrom shutil import rmtree\nimport os\n\n# from .context import kdlc\n\ntest_generated_dir = os.path.dirname(__file__) + \"/generated/\"\n\n\n@pytest.fixture(scope=\"session\")\ndef my_setup(request):\n print(\"\\nDoing setup\")\n\n def fin():\n print(\"\\nDoing teardown\")\n\n if os.path.exists(test_generated_dir):\n rmtree(test_generated_dir)\n\n kdlc.cleanup()\n\n request.addfinalizer(fin)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Augmentor:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Augmentor:
def __init__(self) ->None:
self.__AUGMENTATION_VALID__ = 'VALID'
def augment(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:
PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:
augmented_samples = None
if discriminator is not None and preprocessor is None:
raise Exception(
'To use discriminator, preprocessor should be required.')
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator,
preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int
) ->Dict[str, Any]:
example[self.__AUGMENTATION_VALID__
] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid,
with_indices=True)
augmented = augmented.filter(lambda e: e[self.
__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples,
augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(
dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.
__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
<|reserved_special_token_0|>
def discriminate(self, model: PreTrainedModel, preprocessor:
ClassificationDatasetPreprocessor, original: Dataset, augmented:
Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[
str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented,
augmented_scores):
if original_score['label'] == augmented_score['label'
] and augmented_score['score'] >= threshold:
matched.append(i)
logs.append({'original': original[preprocessor.input_column],
'original_label': original_score['label'], 'original_score':
original_score['score'], 'augmented': augmented[
preprocessor.input_column], 'augmented_label':
augmented_score['label'], 'augmented_score':
augmented_score['score']})
return matched, logs
def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict
[str, Union[int, float]]]:
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
with torch.no_grad():
input_ids = examples['input_ids'].to(device)
if 'token_type_ids' in examples.column_names:
token_type_ids = examples['token_type_ids'].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims
=True)
return [{'label': model.config.id2label[item.argmax()], 'score':
item.max().item()} for item in scores]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Augmentor:
def __init__(self) ->None:
self.__AUGMENTATION_VALID__ = 'VALID'
def augment(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:
PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:
augmented_samples = None
if discriminator is not None and preprocessor is None:
raise Exception(
'To use discriminator, preprocessor should be required.')
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator,
preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int
) ->Dict[str, Any]:
example[self.__AUGMENTATION_VALID__
] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid,
with_indices=True)
augmented = augmented.filter(lambda e: e[self.
__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples,
augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(
dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.
__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
def generate(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor) ->BatchEncoding:
raise NotImplementedError(
'Augmentor subclass should implement augment_sample.')
def discriminate(self, model: PreTrainedModel, preprocessor:
ClassificationDatasetPreprocessor, original: Dataset, augmented:
Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[
str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented,
augmented_scores):
if original_score['label'] == augmented_score['label'
] and augmented_score['score'] >= threshold:
matched.append(i)
logs.append({'original': original[preprocessor.input_column],
'original_label': original_score['label'], 'original_score':
original_score['score'], 'augmented': augmented[
preprocessor.input_column], 'augmented_label':
augmented_score['label'], 'augmented_score':
augmented_score['score']})
return matched, logs
def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict
[str, Union[int, float]]]:
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
with torch.no_grad():
input_ids = examples['input_ids'].to(device)
if 'token_type_ids' in examples.column_names:
token_type_ids = examples['token_type_ids'].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims
=True)
return [{'label': model.config.id2label[item.argmax()], 'score':
item.max().item()} for item in scores]
<|reserved_special_token_1|>
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from datasets import concatenate_datasets
from datasets.arrow_dataset import Dataset
from transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import ClassificationDatasetPreprocessor
from transformers import PreTrainedModel
from transformers.tokenization_utils import BatchEncoding
class Augmentor:
def __init__(self) ->None:
self.__AUGMENTATION_VALID__ = 'VALID'
def augment(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:
PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:
augmented_samples = None
if discriminator is not None and preprocessor is None:
raise Exception(
'To use discriminator, preprocessor should be required.')
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator,
preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int
) ->Dict[str, Any]:
example[self.__AUGMENTATION_VALID__
] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid,
with_indices=True)
augmented = augmented.filter(lambda e: e[self.
__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples,
augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(
dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.
__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
def generate(self, dataset: Dataset, preprocessor:
ClassificationDatasetPreprocessor) ->BatchEncoding:
raise NotImplementedError(
'Augmentor subclass should implement augment_sample.')
def discriminate(self, model: PreTrainedModel, preprocessor:
ClassificationDatasetPreprocessor, original: Dataset, augmented:
Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[
str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented,
augmented_scores):
if original_score['label'] == augmented_score['label'
] and augmented_score['score'] >= threshold:
matched.append(i)
logs.append({'original': original[preprocessor.input_column],
'original_label': original_score['label'], 'original_score':
original_score['score'], 'augmented': augmented[
preprocessor.input_column], 'augmented_label':
augmented_score['label'], 'augmented_score':
augmented_score['score']})
return matched, logs
def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict
[str, Union[int, float]]]:
model.eval()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(device)
with torch.no_grad():
input_ids = examples['input_ids'].to(device)
if 'token_type_ids' in examples.column_names:
token_type_ids = examples['token_type_ids'].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims
=True)
return [{'label': model.config.id2label[item.argmax()], 'score':
item.max().item()} for item in scores]
<|reserved_special_token_1|>
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from datasets import concatenate_datasets
from datasets.arrow_dataset import Dataset
from transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import (
ClassificationDatasetPreprocessor,
)
from transformers import PreTrainedModel
from transformers.tokenization_utils import BatchEncoding
class Augmentor:
def __init__(self) -> None:
self.__AUGMENTATION_VALID__ = "VALID"
def augment(
self,
dataset: Dataset,
preprocessor: ClassificationDatasetPreprocessor,
num_trial: int = 2,
discriminator: PreTrainedModel = None,
threshold: float = 0.8,
) -> BatchEncoding:
augmented_samples = None # type: Optional[BatchEncoding]
if discriminator is not None and preprocessor is None:
raise Exception("To use discriminator, preprocessor should be required.")
for _ in range(num_trial):
original = dataset.shuffle()
augmented = self.generate(original, preprocessor)
if discriminator is not None and preprocessor is not None:
matched, log = self.discriminate(discriminator, preprocessor, original, augmented, threshold)
def unmatched_to_invalid(example: Dict[str, Any], index: int) -> Dict[str, Any]:
example[self.__AUGMENTATION_VALID__] = True if index in matched else False
return example
augmented = augmented.map(unmatched_to_invalid, with_indices=True)
augmented = augmented.filter(lambda e: e[self.__AUGMENTATION_VALID__])
if len(augmented) == 0:
continue
if augmented_samples is None:
augmented_samples = augmented
else:
augmented_samples = concatenate_datasets([augmented_samples, augmented])
if len(dataset) <= len(augmented_samples):
augmented_samples = augmented_samples.select(range(len(dataset)))
break
if augmented_samples is not None:
augmented_samples = augmented_samples.remove_columns([self.__AUGMENTATION_VALID__])
augmented_samples = augmented_samples.flatten_indices()
return augmented_samples
def generate(self, dataset: Dataset, preprocessor: ClassificationDatasetPreprocessor) -> BatchEncoding:
raise NotImplementedError("Augmentor subclass should implement augment_sample.")
def discriminate(
self,
model: PreTrainedModel,
preprocessor: ClassificationDatasetPreprocessor,
original: Dataset,
augmented: Dataset,
threshold: float,
) -> Tuple[List[int], List[Dict[str, Union[str, float]]]]:
formatted_original = preprocessor.format(original)
original_scores = self.predict(model, formatted_original)
formatted_augmented = preprocessor.format(augmented)
augmented_scores = self.predict(model, formatted_augmented)
matched = []
logs = []
for i, original, original_score, augmented, augmented_score in zip(
range(len(original)), original, original_scores, augmented, augmented_scores
):
if original_score["label"] == augmented_score["label"] and augmented_score["score"] >= threshold:
matched.append(i)
logs.append(
{
"original": original[preprocessor.input_column],
"original_label": original_score["label"],
"original_score": original_score["score"],
"augmented": augmented[preprocessor.input_column],
"augmented_label": augmented_score["label"],
"augmented_score": augmented_score["score"],
}
)
return (matched, logs)
def predict(
self,
model: PreTrainedModel,
examples: Dataset,
) -> List[Dict[str, Union[int, float]]]:
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
with torch.no_grad(): # type: ignore
input_ids = examples["input_ids"].to(device)
if "token_type_ids" in examples.column_names:
token_type_ids = examples["token_type_ids"].to(device)
outputs = model(input_ids, token_type_ids=token_type_ids)
else:
outputs = model(input_ids)
predictions = outputs[0].cpu().numpy()
scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims=True)
return [{"label": model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores]
|
flexible
|
{
"blob_id": "4a88ce640b6680df925288b44232cf43d585c11c",
"index": 669,
"step-1": "<mask token>\n\n\nclass Augmentor:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Augmentor:\n\n def __init__(self) ->None:\n self.__AUGMENTATION_VALID__ = 'VALID'\n\n def augment(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:\n PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:\n augmented_samples = None\n if discriminator is not None and preprocessor is None:\n raise Exception(\n 'To use discriminator, preprocessor should be required.')\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator,\n preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int\n ) ->Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__\n ] = True if index in matched else False\n return example\n augmented = augmented.map(unmatched_to_invalid,\n with_indices=True)\n augmented = augmented.filter(lambda e: e[self.\n __AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples,\n augmented])\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(\n dataset)))\n break\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.\n __AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n return augmented_samples\n <mask token>\n\n def discriminate(self, model: PreTrainedModel, preprocessor:\n ClassificationDatasetPreprocessor, original: Dataset, augmented:\n Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[\n str, float]]]]:\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented,\n augmented_scores):\n if original_score['label'] == augmented_score['label'\n ] and augmented_score['score'] >= threshold:\n matched.append(i)\n logs.append({'original': original[preprocessor.input_column],\n 'original_label': original_score['label'], 'original_score':\n original_score['score'], 'augmented': augmented[\n preprocessor.input_column], 'augmented_label':\n augmented_score['label'], 'augmented_score':\n augmented_score['score']})\n return matched, logs\n\n def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict\n [str, Union[int, float]]]:\n model.eval()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n with torch.no_grad():\n input_ids = examples['input_ids'].to(device)\n if 'token_type_ids' in examples.column_names:\n token_type_ids = examples['token_type_ids'].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n predictions = outputs[0].cpu().numpy()\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims\n =True)\n return [{'label': model.config.id2label[item.argmax()], 'score':\n item.max().item()} for item in scores]\n",
"step-3": "<mask token>\n\n\nclass Augmentor:\n\n def __init__(self) ->None:\n self.__AUGMENTATION_VALID__ = 'VALID'\n\n def augment(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:\n PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:\n augmented_samples = None\n if discriminator is not None and preprocessor is None:\n raise Exception(\n 'To use discriminator, preprocessor should be required.')\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator,\n preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int\n ) ->Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__\n ] = True if index in matched else False\n return example\n augmented = augmented.map(unmatched_to_invalid,\n with_indices=True)\n augmented = augmented.filter(lambda e: e[self.\n __AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples,\n augmented])\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(\n dataset)))\n break\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.\n __AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n return augmented_samples\n\n def generate(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor) ->BatchEncoding:\n raise NotImplementedError(\n 'Augmentor subclass should implement augment_sample.')\n\n def discriminate(self, model: PreTrainedModel, preprocessor:\n ClassificationDatasetPreprocessor, original: Dataset, augmented:\n Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[\n str, float]]]]:\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented,\n augmented_scores):\n if original_score['label'] == augmented_score['label'\n ] and augmented_score['score'] >= threshold:\n matched.append(i)\n logs.append({'original': original[preprocessor.input_column],\n 'original_label': original_score['label'], 'original_score':\n original_score['score'], 'augmented': augmented[\n preprocessor.input_column], 'augmented_label':\n augmented_score['label'], 'augmented_score':\n augmented_score['score']})\n return matched, logs\n\n def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict\n [str, Union[int, float]]]:\n model.eval()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n with torch.no_grad():\n input_ids = examples['input_ids'].to(device)\n if 'token_type_ids' in examples.column_names:\n token_type_ids = examples['token_type_ids'].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n predictions = outputs[0].cpu().numpy()\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims\n =True)\n return [{'label': model.config.id2label[item.argmax()], 'score':\n item.max().item()} for item in scores]\n",
"step-4": "from typing import Any, Dict, List, Optional, Tuple, Union\nimport numpy as np\nimport torch\nfrom datasets import concatenate_datasets\nfrom datasets.arrow_dataset import Dataset\nfrom transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import ClassificationDatasetPreprocessor\nfrom transformers import PreTrainedModel\nfrom transformers.tokenization_utils import BatchEncoding\n\n\nclass Augmentor:\n\n def __init__(self) ->None:\n self.__AUGMENTATION_VALID__ = 'VALID'\n\n def augment(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor, num_trial: int=2, discriminator:\n PreTrainedModel=None, threshold: float=0.8) ->BatchEncoding:\n augmented_samples = None\n if discriminator is not None and preprocessor is None:\n raise Exception(\n 'To use discriminator, preprocessor should be required.')\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator,\n preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int\n ) ->Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__\n ] = True if index in matched else False\n return example\n augmented = augmented.map(unmatched_to_invalid,\n with_indices=True)\n augmented = augmented.filter(lambda e: e[self.\n __AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples,\n augmented])\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(\n dataset)))\n break\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.\n __AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n return augmented_samples\n\n def generate(self, dataset: Dataset, preprocessor:\n ClassificationDatasetPreprocessor) ->BatchEncoding:\n raise NotImplementedError(\n 'Augmentor subclass should implement augment_sample.')\n\n def discriminate(self, model: PreTrainedModel, preprocessor:\n ClassificationDatasetPreprocessor, original: Dataset, augmented:\n Dataset, threshold: float) ->Tuple[List[int], List[Dict[str, Union[\n str, float]]]]:\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented,\n augmented_scores):\n if original_score['label'] == augmented_score['label'\n ] and augmented_score['score'] >= threshold:\n matched.append(i)\n logs.append({'original': original[preprocessor.input_column],\n 'original_label': original_score['label'], 'original_score':\n original_score['score'], 'augmented': augmented[\n preprocessor.input_column], 'augmented_label':\n augmented_score['label'], 'augmented_score':\n augmented_score['score']})\n return matched, logs\n\n def predict(self, model: PreTrainedModel, examples: Dataset) ->List[Dict\n [str, Union[int, float]]]:\n model.eval()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.to(device)\n with torch.no_grad():\n input_ids = examples['input_ids'].to(device)\n if 'token_type_ids' in examples.column_names:\n token_type_ids = examples['token_type_ids'].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n predictions = outputs[0].cpu().numpy()\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims\n =True)\n return [{'label': model.config.id2label[item.argmax()], 'score':\n item.max().item()} for item in scores]\n",
"step-5": "from typing import Any, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom datasets import concatenate_datasets\nfrom datasets.arrow_dataset import Dataset\nfrom transfer_classifier.dataset_preprocessor.classification_dataset_preprocessor import (\n ClassificationDatasetPreprocessor,\n)\nfrom transformers import PreTrainedModel\nfrom transformers.tokenization_utils import BatchEncoding\n\n\nclass Augmentor:\n def __init__(self) -> None:\n self.__AUGMENTATION_VALID__ = \"VALID\"\n\n def augment(\n self,\n dataset: Dataset,\n preprocessor: ClassificationDatasetPreprocessor,\n num_trial: int = 2,\n discriminator: PreTrainedModel = None,\n threshold: float = 0.8,\n ) -> BatchEncoding:\n augmented_samples = None # type: Optional[BatchEncoding]\n\n if discriminator is not None and preprocessor is None:\n raise Exception(\"To use discriminator, preprocessor should be required.\")\n\n for _ in range(num_trial):\n original = dataset.shuffle()\n augmented = self.generate(original, preprocessor)\n if discriminator is not None and preprocessor is not None:\n matched, log = self.discriminate(discriminator, preprocessor, original, augmented, threshold)\n\n def unmatched_to_invalid(example: Dict[str, Any], index: int) -> Dict[str, Any]:\n example[self.__AUGMENTATION_VALID__] = True if index in matched else False\n return example\n\n augmented = augmented.map(unmatched_to_invalid, with_indices=True)\n\n augmented = augmented.filter(lambda e: e[self.__AUGMENTATION_VALID__])\n if len(augmented) == 0:\n continue\n\n if augmented_samples is None:\n augmented_samples = augmented\n else:\n augmented_samples = concatenate_datasets([augmented_samples, augmented])\n\n if len(dataset) <= len(augmented_samples):\n augmented_samples = augmented_samples.select(range(len(dataset)))\n break\n\n if augmented_samples is not None:\n augmented_samples = augmented_samples.remove_columns([self.__AUGMENTATION_VALID__])\n augmented_samples = augmented_samples.flatten_indices()\n\n return augmented_samples\n\n def generate(self, dataset: Dataset, preprocessor: ClassificationDatasetPreprocessor) -> BatchEncoding:\n raise NotImplementedError(\"Augmentor subclass should implement augment_sample.\")\n\n def discriminate(\n self,\n model: PreTrainedModel,\n preprocessor: ClassificationDatasetPreprocessor,\n original: Dataset,\n augmented: Dataset,\n threshold: float,\n ) -> Tuple[List[int], List[Dict[str, Union[str, float]]]]:\n\n formatted_original = preprocessor.format(original)\n original_scores = self.predict(model, formatted_original)\n\n formatted_augmented = preprocessor.format(augmented)\n augmented_scores = self.predict(model, formatted_augmented)\n\n matched = []\n logs = []\n for i, original, original_score, augmented, augmented_score in zip(\n range(len(original)), original, original_scores, augmented, augmented_scores\n ):\n if original_score[\"label\"] == augmented_score[\"label\"] and augmented_score[\"score\"] >= threshold:\n matched.append(i)\n\n logs.append(\n {\n \"original\": original[preprocessor.input_column],\n \"original_label\": original_score[\"label\"],\n \"original_score\": original_score[\"score\"],\n \"augmented\": augmented[preprocessor.input_column],\n \"augmented_label\": augmented_score[\"label\"],\n \"augmented_score\": augmented_score[\"score\"],\n }\n )\n\n return (matched, logs)\n\n def predict(\n self,\n model: PreTrainedModel,\n examples: Dataset,\n ) -> List[Dict[str, Union[int, float]]]:\n model.eval()\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n model.to(device)\n with torch.no_grad(): # type: ignore\n input_ids = examples[\"input_ids\"].to(device)\n if \"token_type_ids\" in examples.column_names:\n token_type_ids = examples[\"token_type_ids\"].to(device)\n outputs = model(input_ids, token_type_ids=token_type_ids)\n else:\n outputs = model(input_ids)\n\n predictions = outputs[0].cpu().numpy()\n\n scores = np.exp(predictions) / np.exp(predictions).sum(-1, keepdims=True)\n return [{\"label\": model.config.id2label[item.argmax()], \"score\": item.max().item()} for item in scores]\n",
"step-ids": [
1,
5,
6,
7,
8
]
}
|
[
1,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
views = Blueprint('views', __name__)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Blueprint
views = Blueprint('views', __name__)
from . import routes
|
flexible
|
{
"blob_id": "139ccdaf7acb2a2d74649f0c32217d1fe71a954a",
"index": 4800,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nviews = Blueprint('views', __name__)\n<mask token>\n",
"step-3": "from flask import Blueprint\nviews = Blueprint('views', __name__)\nfrom . import routes\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import pygame
import sys
import time
import random
from snake_gym.envs.modules import *
from pygame.locals import *
import numpy as np
class SnakeGame(object):
def __init__(self):
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),
0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
self.surface.fill((255, 255, 255))
self.clock = pygame.time.Clock()
self.fps = 60
self.done = False
pygame.key.set_repeat(1, 40)
self.screen.blit(self.surface, (0, 0))
pygame.init()
self.fpsClock = pygame.time.Clock()
self.snake = Snake()
self.apple = Apple()
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
@staticmethod
def _get_image(surface):
ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,
SCREEN_WIDTH))))
for j in range(SCREEN_HEIGHT):
for k in range(SCREEN_WIDTH):
ret[j][k] = surface.get_at((k, j))
return np.array(ret)
|
normal
|
{
"blob_id": "6d61df9ac072100d01a1ce3cf7b4c056f66a163c",
"index": 502,
"step-1": "<mask token>\n\n\nclass SnakeGame(object):\n <mask token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SnakeGame(object):\n <mask token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-3": "<mask token>\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-4": "import pygame\nimport sys\nimport time\nimport random\nfrom snake_gym.envs.modules import *\nfrom pygame.locals import *\nimport numpy as np\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import threading
import time
from datetime import timedelta
from typing import List, Optional, Set
import airflow.utils.dag_processing as dag_processing
from airflow.configuration import conf
from airflow.contrib.jobs.background_service import BackgroundService
from airflow.events.scheduler_events import DagExecutableEvent
from airflow.jobs import scheduler_job
from airflow.models import DagModel
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mailbox import Mailbox
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.session import create_session
class DagTriggerDagFileProcessorAgent(dag_processing.DagFileProcessorAgent):
def wait_on_manager_message(self, timeout=None):
self._parent_signal_conn.poll(timeout)
class StoppableThread(threading.Thread, LoggingMixin):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._ended = threading.Event()
def stop(self):
self.log.debug("stopping thread")
self._ended.set()
def stopped(self):
return self._ended.is_set()
class DagRunnableReportingThread(StoppableThread, LoggingMixin):
def __init__(self, async_mode: bool, dag_file_processor_agent, mailbox: Mailbox, *args, **kwargs):
super(DagRunnableReportingThread, self).__init__(*args, **kwargs)
self.setName("DagTrigger-DagRunnableReporter")
self._async_mode = async_mode
self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = dag_file_processor_agent
self._mailbox = mailbox
def run(self) -> None:
while not self.stopped():
# send AGENT_RUN_ONCE to DagFileProcessorManager to trigger dag parsing if not async mode
if not self._async_mode:
self._dag_file_processor_agent.run_single_parsing_loop()
with create_session() as session:
dag_models = DagModel.dags_needing_dagruns(session).all()
self.log.debug("dags needs dagruns: {}".format(dag_models))
self._send_dag_executable(dag_models)
time.sleep(5)
self.log.info("DagRunnableReporter exiting")
def _send_dag_executable(self, dag_models: Set[DagModel]):
for dag_model in dag_models:
self._mailbox.send_message(DagExecutableEvent(dag_model.dag_id).to_event())
class ParsingStatRetrieveThread(StoppableThread):
def __init__(self, dag_file_processor_agent, *args, **kwargs):
super(ParsingStatRetrieveThread, self).__init__(*args, **kwargs)
self.setName("DagTrigger-ParsingStatRetriever")
self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = dag_file_processor_agent
def run(self) -> None:
while not self.stopped():
self._dag_file_processor_agent.wait_on_manager_message()
self._dag_file_processor_agent.heartbeat()
time.sleep(10)
self.log.info("ParsingStatRetriever exiting")
class DagTrigger(BackgroundService, MultiprocessingStartMethodMixin):
def __init__(self,
dag_directory: str,
max_runs: int,
dag_ids: Optional[List[str]],
pickle_dags: bool,
mailbox: Mailbox,
refresh_dag_dir_interval=1,
notification_service_uri=None):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param mailbox: the mailbox to send the DagExecutableEvent
:type mailbox: Mailbox
"""
super().__init__()
self._dag_directory = dag_directory
self._max_runs = max_runs
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._mailbox = mailbox
# use synchronize mode when using sqlite
self._async_mode = not conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite')
self._dag_runnable_reporting_thread: Optional[StoppableThread] = None
self._parsing_stat_process_thread: Optional[StoppableThread] = None
self._dag_file_processor_agent: Optional[DagTriggerDagFileProcessorAgent] = None
self._refresh_dag_dir_interval = refresh_dag_dir_interval
self._notification_service_uri = notification_service_uri
def start(self):
self._start_dag_file_processor_manager()
self._dag_runnable_reporting_thread = DagRunnableReportingThread(self._async_mode,
self._dag_file_processor_agent,
self._mailbox)
self._dag_runnable_reporting_thread.start()
self._parsing_stat_process_thread = ParsingStatRetrieveThread(self._dag_file_processor_agent)
self._parsing_stat_process_thread.start()
def end(self) -> None:
if self._dag_file_processor_agent is not None:
self._dag_file_processor_agent.terminate()
if self._dag_runnable_reporting_thread is not None:
self._dag_runnable_reporting_thread.stop()
if self._parsing_stat_process_thread is not None:
self._parsing_stat_process_thread.stop()
self._dag_runnable_reporting_thread.join()
self._parsing_stat_process_thread.join()
self._dag_file_processor_agent.end()
def terminate(self):
if self._dag_file_processor_agent is not None:
self._dag_file_processor_agent.end()
if self._dag_runnable_reporting_thread is not None:
self._dag_runnable_reporting_thread.stop()
if self._parsing_stat_process_thread is not None:
self._parsing_stat_process_thread.stop()
def _start_dag_file_processor_manager(self):
processor_factory = scheduler_job.SchedulerJob._create_dag_file_processor
self._dag_file_processor_agent = DagTriggerDagFileProcessorAgent(self._dag_directory,
self._max_runs,
processor_factory,
self._get_processor_timeout(),
[],
self._pickle_dags,
self._async_mode,
self._refresh_dag_dir_interval,
self._notification_service_uri)
self._dag_file_processor_agent.start()
@staticmethod
def _get_processor_timeout():
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
return timedelta(seconds=processor_timeout_seconds)
def is_alive(self) -> bool:
return self._dag_file_processor_agent is not None \
and self._dag_runnable_reporting_thread is not None and self._dag_runnable_reporting_thread.is_alive() \
and self._parsing_stat_process_thread is not None and self._parsing_stat_process_thread.is_alive()
|
normal
|
{
"blob_id": "8e26a6b50539fa5f498aa2079a2625214e5b4d03",
"index": 5919,
"step-1": "<mask token>\n\n\nclass DagRunnableReportingThread(StoppableThread, LoggingMixin):\n\n def __init__(self, async_mode: bool, dag_file_processor_agent, mailbox:\n Mailbox, *args, **kwargs):\n super(DagRunnableReportingThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-DagRunnableReporter')\n self._async_mode = async_mode\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n self._mailbox = mailbox\n <mask token>\n <mask token>\n\n\nclass ParsingStatRetrieveThread(StoppableThread):\n\n def __init__(self, dag_file_processor_agent, *args, **kwargs):\n super(ParsingStatRetrieveThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-ParsingStatRetriever')\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n\n def run(self) ->None:\n while not self.stopped():\n self._dag_file_processor_agent.wait_on_manager_message()\n self._dag_file_processor_agent.heartbeat()\n time.sleep(10)\n self.log.info('ParsingStatRetriever exiting')\n\n\nclass DagTrigger(BackgroundService, MultiprocessingStartMethodMixin):\n\n def __init__(self, dag_directory: str, max_runs: int, dag_ids: Optional\n [List[str]], pickle_dags: bool, mailbox: Mailbox,\n refresh_dag_dir_interval=1, notification_service_uri=None):\n \"\"\"\n :param dag_directory: Directory where DAG definitions are kept. All\n files in file_paths should be under this directory\n :type dag_directory: unicode\n :param max_runs: The number of times to parse and schedule each file. -1\n for unlimited.\n :type max_runs: int\n :param dag_ids: if specified, only schedule tasks with these DAG IDs\n :type dag_ids: list[str]\n :param pickle_dags: whether to pickle DAGs.\n :type pickle_dags: bool\n :param mailbox: the mailbox to send the DagExecutableEvent\n :type mailbox: Mailbox\n \"\"\"\n super().__init__()\n self._dag_directory = dag_directory\n self._max_runs = max_runs\n self._dag_ids = dag_ids\n self._pickle_dags = pickle_dags\n self._mailbox = mailbox\n self._async_mode = not conf.get('core', 'sql_alchemy_conn').lower(\n ).startswith('sqlite')\n self._dag_runnable_reporting_thread: Optional[StoppableThread] = None\n self._parsing_stat_process_thread: Optional[StoppableThread] = None\n self._dag_file_processor_agent: Optional[\n DagTriggerDagFileProcessorAgent] = None\n self._refresh_dag_dir_interval = refresh_dag_dir_interval\n self._notification_service_uri = notification_service_uri\n\n def start(self):\n self._start_dag_file_processor_manager()\n self._dag_runnable_reporting_thread = DagRunnableReportingThread(self\n ._async_mode, self._dag_file_processor_agent, self._mailbox)\n self._dag_runnable_reporting_thread.start()\n self._parsing_stat_process_thread = ParsingStatRetrieveThread(self.\n _dag_file_processor_agent)\n self._parsing_stat_process_thread.start()\n\n def end(self) ->None:\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.terminate()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n self._dag_runnable_reporting_thread.join()\n self._parsing_stat_process_thread.join()\n self._dag_file_processor_agent.end()\n\n def terminate(self):\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.end()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n\n def _start_dag_file_processor_manager(self):\n processor_factory = (scheduler_job.SchedulerJob.\n _create_dag_file_processor)\n self._dag_file_processor_agent = DagTriggerDagFileProcessorAgent(self\n ._dag_directory, self._max_runs, processor_factory, self.\n _get_processor_timeout(), [], self._pickle_dags, self.\n _async_mode, self._refresh_dag_dir_interval, self.\n _notification_service_uri)\n self._dag_file_processor_agent.start()\n\n @staticmethod\n def _get_processor_timeout():\n processor_timeout_seconds: int = conf.getint('core',\n 'dag_file_processor_timeout')\n return timedelta(seconds=processor_timeout_seconds)\n\n def is_alive(self) ->bool:\n return (self._dag_file_processor_agent is not None and self.\n _dag_runnable_reporting_thread is not None and self.\n _dag_runnable_reporting_thread.is_alive() and self.\n _parsing_stat_process_thread is not None and self.\n _parsing_stat_process_thread.is_alive())\n",
"step-2": "<mask token>\n\n\nclass StoppableThread(threading.Thread, LoggingMixin):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(StoppableThread, self).__init__(*args, **kwargs)\n self._ended = threading.Event()\n\n def stop(self):\n self.log.debug('stopping thread')\n self._ended.set()\n\n def stopped(self):\n return self._ended.is_set()\n\n\nclass DagRunnableReportingThread(StoppableThread, LoggingMixin):\n\n def __init__(self, async_mode: bool, dag_file_processor_agent, mailbox:\n Mailbox, *args, **kwargs):\n super(DagRunnableReportingThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-DagRunnableReporter')\n self._async_mode = async_mode\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n self._mailbox = mailbox\n\n def run(self) ->None:\n while not self.stopped():\n if not self._async_mode:\n self._dag_file_processor_agent.run_single_parsing_loop()\n with create_session() as session:\n dag_models = DagModel.dags_needing_dagruns(session).all()\n self.log.debug('dags needs dagruns: {}'.format(dag_models))\n self._send_dag_executable(dag_models)\n time.sleep(5)\n self.log.info('DagRunnableReporter exiting')\n\n def _send_dag_executable(self, dag_models: Set[DagModel]):\n for dag_model in dag_models:\n self._mailbox.send_message(DagExecutableEvent(dag_model.dag_id)\n .to_event())\n\n\nclass ParsingStatRetrieveThread(StoppableThread):\n\n def __init__(self, dag_file_processor_agent, *args, **kwargs):\n super(ParsingStatRetrieveThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-ParsingStatRetriever')\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n\n def run(self) ->None:\n while not self.stopped():\n self._dag_file_processor_agent.wait_on_manager_message()\n self._dag_file_processor_agent.heartbeat()\n time.sleep(10)\n self.log.info('ParsingStatRetriever exiting')\n\n\nclass DagTrigger(BackgroundService, MultiprocessingStartMethodMixin):\n\n def __init__(self, dag_directory: str, max_runs: int, dag_ids: Optional\n [List[str]], pickle_dags: bool, mailbox: Mailbox,\n refresh_dag_dir_interval=1, notification_service_uri=None):\n \"\"\"\n :param dag_directory: Directory where DAG definitions are kept. All\n files in file_paths should be under this directory\n :type dag_directory: unicode\n :param max_runs: The number of times to parse and schedule each file. -1\n for unlimited.\n :type max_runs: int\n :param dag_ids: if specified, only schedule tasks with these DAG IDs\n :type dag_ids: list[str]\n :param pickle_dags: whether to pickle DAGs.\n :type pickle_dags: bool\n :param mailbox: the mailbox to send the DagExecutableEvent\n :type mailbox: Mailbox\n \"\"\"\n super().__init__()\n self._dag_directory = dag_directory\n self._max_runs = max_runs\n self._dag_ids = dag_ids\n self._pickle_dags = pickle_dags\n self._mailbox = mailbox\n self._async_mode = not conf.get('core', 'sql_alchemy_conn').lower(\n ).startswith('sqlite')\n self._dag_runnable_reporting_thread: Optional[StoppableThread] = None\n self._parsing_stat_process_thread: Optional[StoppableThread] = None\n self._dag_file_processor_agent: Optional[\n DagTriggerDagFileProcessorAgent] = None\n self._refresh_dag_dir_interval = refresh_dag_dir_interval\n self._notification_service_uri = notification_service_uri\n\n def start(self):\n self._start_dag_file_processor_manager()\n self._dag_runnable_reporting_thread = DagRunnableReportingThread(self\n ._async_mode, self._dag_file_processor_agent, self._mailbox)\n self._dag_runnable_reporting_thread.start()\n self._parsing_stat_process_thread = ParsingStatRetrieveThread(self.\n _dag_file_processor_agent)\n self._parsing_stat_process_thread.start()\n\n def end(self) ->None:\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.terminate()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n self._dag_runnable_reporting_thread.join()\n self._parsing_stat_process_thread.join()\n self._dag_file_processor_agent.end()\n\n def terminate(self):\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.end()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n\n def _start_dag_file_processor_manager(self):\n processor_factory = (scheduler_job.SchedulerJob.\n _create_dag_file_processor)\n self._dag_file_processor_agent = DagTriggerDagFileProcessorAgent(self\n ._dag_directory, self._max_runs, processor_factory, self.\n _get_processor_timeout(), [], self._pickle_dags, self.\n _async_mode, self._refresh_dag_dir_interval, self.\n _notification_service_uri)\n self._dag_file_processor_agent.start()\n\n @staticmethod\n def _get_processor_timeout():\n processor_timeout_seconds: int = conf.getint('core',\n 'dag_file_processor_timeout')\n return timedelta(seconds=processor_timeout_seconds)\n\n def is_alive(self) ->bool:\n return (self._dag_file_processor_agent is not None and self.\n _dag_runnable_reporting_thread is not None and self.\n _dag_runnable_reporting_thread.is_alive() and self.\n _parsing_stat_process_thread is not None and self.\n _parsing_stat_process_thread.is_alive())\n",
"step-3": "<mask token>\n\n\nclass StoppableThread(threading.Thread, LoggingMixin):\n \"\"\"Thread class with a stop() method. The thread itself has to check\n regularly for the stopped() condition.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(StoppableThread, self).__init__(*args, **kwargs)\n self._ended = threading.Event()\n\n def stop(self):\n self.log.debug('stopping thread')\n self._ended.set()\n\n def stopped(self):\n return self._ended.is_set()\n\n\nclass DagRunnableReportingThread(StoppableThread, LoggingMixin):\n\n def __init__(self, async_mode: bool, dag_file_processor_agent, mailbox:\n Mailbox, *args, **kwargs):\n super(DagRunnableReportingThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-DagRunnableReporter')\n self._async_mode = async_mode\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n self._mailbox = mailbox\n\n def run(self) ->None:\n while not self.stopped():\n if not self._async_mode:\n self._dag_file_processor_agent.run_single_parsing_loop()\n with create_session() as session:\n dag_models = DagModel.dags_needing_dagruns(session).all()\n self.log.debug('dags needs dagruns: {}'.format(dag_models))\n self._send_dag_executable(dag_models)\n time.sleep(5)\n self.log.info('DagRunnableReporter exiting')\n\n def _send_dag_executable(self, dag_models: Set[DagModel]):\n for dag_model in dag_models:\n self._mailbox.send_message(DagExecutableEvent(dag_model.dag_id)\n .to_event())\n\n\nclass ParsingStatRetrieveThread(StoppableThread):\n\n def __init__(self, dag_file_processor_agent, *args, **kwargs):\n super(ParsingStatRetrieveThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-ParsingStatRetriever')\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n\n def run(self) ->None:\n while not self.stopped():\n self._dag_file_processor_agent.wait_on_manager_message()\n self._dag_file_processor_agent.heartbeat()\n time.sleep(10)\n self.log.info('ParsingStatRetriever exiting')\n\n\nclass DagTrigger(BackgroundService, MultiprocessingStartMethodMixin):\n\n def __init__(self, dag_directory: str, max_runs: int, dag_ids: Optional\n [List[str]], pickle_dags: bool, mailbox: Mailbox,\n refresh_dag_dir_interval=1, notification_service_uri=None):\n \"\"\"\n :param dag_directory: Directory where DAG definitions are kept. All\n files in file_paths should be under this directory\n :type dag_directory: unicode\n :param max_runs: The number of times to parse and schedule each file. -1\n for unlimited.\n :type max_runs: int\n :param dag_ids: if specified, only schedule tasks with these DAG IDs\n :type dag_ids: list[str]\n :param pickle_dags: whether to pickle DAGs.\n :type pickle_dags: bool\n :param mailbox: the mailbox to send the DagExecutableEvent\n :type mailbox: Mailbox\n \"\"\"\n super().__init__()\n self._dag_directory = dag_directory\n self._max_runs = max_runs\n self._dag_ids = dag_ids\n self._pickle_dags = pickle_dags\n self._mailbox = mailbox\n self._async_mode = not conf.get('core', 'sql_alchemy_conn').lower(\n ).startswith('sqlite')\n self._dag_runnable_reporting_thread: Optional[StoppableThread] = None\n self._parsing_stat_process_thread: Optional[StoppableThread] = None\n self._dag_file_processor_agent: Optional[\n DagTriggerDagFileProcessorAgent] = None\n self._refresh_dag_dir_interval = refresh_dag_dir_interval\n self._notification_service_uri = notification_service_uri\n\n def start(self):\n self._start_dag_file_processor_manager()\n self._dag_runnable_reporting_thread = DagRunnableReportingThread(self\n ._async_mode, self._dag_file_processor_agent, self._mailbox)\n self._dag_runnable_reporting_thread.start()\n self._parsing_stat_process_thread = ParsingStatRetrieveThread(self.\n _dag_file_processor_agent)\n self._parsing_stat_process_thread.start()\n\n def end(self) ->None:\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.terminate()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n self._dag_runnable_reporting_thread.join()\n self._parsing_stat_process_thread.join()\n self._dag_file_processor_agent.end()\n\n def terminate(self):\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.end()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n\n def _start_dag_file_processor_manager(self):\n processor_factory = (scheduler_job.SchedulerJob.\n _create_dag_file_processor)\n self._dag_file_processor_agent = DagTriggerDagFileProcessorAgent(self\n ._dag_directory, self._max_runs, processor_factory, self.\n _get_processor_timeout(), [], self._pickle_dags, self.\n _async_mode, self._refresh_dag_dir_interval, self.\n _notification_service_uri)\n self._dag_file_processor_agent.start()\n\n @staticmethod\n def _get_processor_timeout():\n processor_timeout_seconds: int = conf.getint('core',\n 'dag_file_processor_timeout')\n return timedelta(seconds=processor_timeout_seconds)\n\n def is_alive(self) ->bool:\n return (self._dag_file_processor_agent is not None and self.\n _dag_runnable_reporting_thread is not None and self.\n _dag_runnable_reporting_thread.is_alive() and self.\n _parsing_stat_process_thread is not None and self.\n _parsing_stat_process_thread.is_alive())\n",
"step-4": "<mask token>\n\n\nclass DagTriggerDagFileProcessorAgent(dag_processing.DagFileProcessorAgent):\n\n def wait_on_manager_message(self, timeout=None):\n self._parent_signal_conn.poll(timeout)\n\n\nclass StoppableThread(threading.Thread, LoggingMixin):\n \"\"\"Thread class with a stop() method. The thread itself has to check\n regularly for the stopped() condition.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(StoppableThread, self).__init__(*args, **kwargs)\n self._ended = threading.Event()\n\n def stop(self):\n self.log.debug('stopping thread')\n self._ended.set()\n\n def stopped(self):\n return self._ended.is_set()\n\n\nclass DagRunnableReportingThread(StoppableThread, LoggingMixin):\n\n def __init__(self, async_mode: bool, dag_file_processor_agent, mailbox:\n Mailbox, *args, **kwargs):\n super(DagRunnableReportingThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-DagRunnableReporter')\n self._async_mode = async_mode\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n self._mailbox = mailbox\n\n def run(self) ->None:\n while not self.stopped():\n if not self._async_mode:\n self._dag_file_processor_agent.run_single_parsing_loop()\n with create_session() as session:\n dag_models = DagModel.dags_needing_dagruns(session).all()\n self.log.debug('dags needs dagruns: {}'.format(dag_models))\n self._send_dag_executable(dag_models)\n time.sleep(5)\n self.log.info('DagRunnableReporter exiting')\n\n def _send_dag_executable(self, dag_models: Set[DagModel]):\n for dag_model in dag_models:\n self._mailbox.send_message(DagExecutableEvent(dag_model.dag_id)\n .to_event())\n\n\nclass ParsingStatRetrieveThread(StoppableThread):\n\n def __init__(self, dag_file_processor_agent, *args, **kwargs):\n super(ParsingStatRetrieveThread, self).__init__(*args, **kwargs)\n self.setName('DagTrigger-ParsingStatRetriever')\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = (\n dag_file_processor_agent)\n\n def run(self) ->None:\n while not self.stopped():\n self._dag_file_processor_agent.wait_on_manager_message()\n self._dag_file_processor_agent.heartbeat()\n time.sleep(10)\n self.log.info('ParsingStatRetriever exiting')\n\n\nclass DagTrigger(BackgroundService, MultiprocessingStartMethodMixin):\n\n def __init__(self, dag_directory: str, max_runs: int, dag_ids: Optional\n [List[str]], pickle_dags: bool, mailbox: Mailbox,\n refresh_dag_dir_interval=1, notification_service_uri=None):\n \"\"\"\n :param dag_directory: Directory where DAG definitions are kept. All\n files in file_paths should be under this directory\n :type dag_directory: unicode\n :param max_runs: The number of times to parse and schedule each file. -1\n for unlimited.\n :type max_runs: int\n :param dag_ids: if specified, only schedule tasks with these DAG IDs\n :type dag_ids: list[str]\n :param pickle_dags: whether to pickle DAGs.\n :type pickle_dags: bool\n :param mailbox: the mailbox to send the DagExecutableEvent\n :type mailbox: Mailbox\n \"\"\"\n super().__init__()\n self._dag_directory = dag_directory\n self._max_runs = max_runs\n self._dag_ids = dag_ids\n self._pickle_dags = pickle_dags\n self._mailbox = mailbox\n self._async_mode = not conf.get('core', 'sql_alchemy_conn').lower(\n ).startswith('sqlite')\n self._dag_runnable_reporting_thread: Optional[StoppableThread] = None\n self._parsing_stat_process_thread: Optional[StoppableThread] = None\n self._dag_file_processor_agent: Optional[\n DagTriggerDagFileProcessorAgent] = None\n self._refresh_dag_dir_interval = refresh_dag_dir_interval\n self._notification_service_uri = notification_service_uri\n\n def start(self):\n self._start_dag_file_processor_manager()\n self._dag_runnable_reporting_thread = DagRunnableReportingThread(self\n ._async_mode, self._dag_file_processor_agent, self._mailbox)\n self._dag_runnable_reporting_thread.start()\n self._parsing_stat_process_thread = ParsingStatRetrieveThread(self.\n _dag_file_processor_agent)\n self._parsing_stat_process_thread.start()\n\n def end(self) ->None:\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.terminate()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n self._dag_runnable_reporting_thread.join()\n self._parsing_stat_process_thread.join()\n self._dag_file_processor_agent.end()\n\n def terminate(self):\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.end()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n\n def _start_dag_file_processor_manager(self):\n processor_factory = (scheduler_job.SchedulerJob.\n _create_dag_file_processor)\n self._dag_file_processor_agent = DagTriggerDagFileProcessorAgent(self\n ._dag_directory, self._max_runs, processor_factory, self.\n _get_processor_timeout(), [], self._pickle_dags, self.\n _async_mode, self._refresh_dag_dir_interval, self.\n _notification_service_uri)\n self._dag_file_processor_agent.start()\n\n @staticmethod\n def _get_processor_timeout():\n processor_timeout_seconds: int = conf.getint('core',\n 'dag_file_processor_timeout')\n return timedelta(seconds=processor_timeout_seconds)\n\n def is_alive(self) ->bool:\n return (self._dag_file_processor_agent is not None and self.\n _dag_runnable_reporting_thread is not None and self.\n _dag_runnable_reporting_thread.is_alive() and self.\n _parsing_stat_process_thread is not None and self.\n _parsing_stat_process_thread.is_alive())\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport threading\nimport time\nfrom datetime import timedelta\nfrom typing import List, Optional, Set\n\nimport airflow.utils.dag_processing as dag_processing\nfrom airflow.configuration import conf\nfrom airflow.contrib.jobs.background_service import BackgroundService\nfrom airflow.events.scheduler_events import DagExecutableEvent\nfrom airflow.jobs import scheduler_job\nfrom airflow.models import DagModel\nfrom airflow.utils.log.logging_mixin import LoggingMixin\nfrom airflow.utils.mailbox import Mailbox\nfrom airflow.utils.mixins import MultiprocessingStartMethodMixin\nfrom airflow.utils.session import create_session\n\n\nclass DagTriggerDagFileProcessorAgent(dag_processing.DagFileProcessorAgent):\n def wait_on_manager_message(self, timeout=None):\n self._parent_signal_conn.poll(timeout)\n\n\nclass StoppableThread(threading.Thread, LoggingMixin):\n \"\"\"Thread class with a stop() method. The thread itself has to check\n regularly for the stopped() condition.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(StoppableThread, self).__init__(*args, **kwargs)\n self._ended = threading.Event()\n\n def stop(self):\n self.log.debug(\"stopping thread\")\n self._ended.set()\n\n def stopped(self):\n return self._ended.is_set()\n\n\nclass DagRunnableReportingThread(StoppableThread, LoggingMixin):\n\n def __init__(self, async_mode: bool, dag_file_processor_agent, mailbox: Mailbox, *args, **kwargs):\n super(DagRunnableReportingThread, self).__init__(*args, **kwargs)\n self.setName(\"DagTrigger-DagRunnableReporter\")\n self._async_mode = async_mode\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = dag_file_processor_agent\n self._mailbox = mailbox\n\n def run(self) -> None:\n while not self.stopped():\n # send AGENT_RUN_ONCE to DagFileProcessorManager to trigger dag parsing if not async mode\n if not self._async_mode:\n self._dag_file_processor_agent.run_single_parsing_loop()\n with create_session() as session:\n dag_models = DagModel.dags_needing_dagruns(session).all()\n self.log.debug(\"dags needs dagruns: {}\".format(dag_models))\n self._send_dag_executable(dag_models)\n time.sleep(5)\n self.log.info(\"DagRunnableReporter exiting\")\n\n def _send_dag_executable(self, dag_models: Set[DagModel]):\n for dag_model in dag_models:\n self._mailbox.send_message(DagExecutableEvent(dag_model.dag_id).to_event())\n\n\nclass ParsingStatRetrieveThread(StoppableThread):\n def __init__(self, dag_file_processor_agent, *args, **kwargs):\n super(ParsingStatRetrieveThread, self).__init__(*args, **kwargs)\n self.setName(\"DagTrigger-ParsingStatRetriever\")\n self._dag_file_processor_agent: DagTriggerDagFileProcessorAgent = dag_file_processor_agent\n\n def run(self) -> None:\n while not self.stopped():\n self._dag_file_processor_agent.wait_on_manager_message()\n self._dag_file_processor_agent.heartbeat()\n time.sleep(10)\n self.log.info(\"ParsingStatRetriever exiting\")\n\n\nclass DagTrigger(BackgroundService, MultiprocessingStartMethodMixin):\n\n def __init__(self,\n dag_directory: str,\n max_runs: int,\n dag_ids: Optional[List[str]],\n pickle_dags: bool,\n mailbox: Mailbox,\n refresh_dag_dir_interval=1,\n notification_service_uri=None):\n \"\"\"\n :param dag_directory: Directory where DAG definitions are kept. All\n files in file_paths should be under this directory\n :type dag_directory: unicode\n :param max_runs: The number of times to parse and schedule each file. -1\n for unlimited.\n :type max_runs: int\n :param dag_ids: if specified, only schedule tasks with these DAG IDs\n :type dag_ids: list[str]\n :param pickle_dags: whether to pickle DAGs.\n :type pickle_dags: bool\n :param mailbox: the mailbox to send the DagExecutableEvent\n :type mailbox: Mailbox\n \"\"\"\n\n super().__init__()\n\n self._dag_directory = dag_directory\n self._max_runs = max_runs\n self._dag_ids = dag_ids\n self._pickle_dags = pickle_dags\n self._mailbox = mailbox\n # use synchronize mode when using sqlite\n self._async_mode = not conf.get('core', 'sql_alchemy_conn').lower().startswith('sqlite')\n\n self._dag_runnable_reporting_thread: Optional[StoppableThread] = None\n self._parsing_stat_process_thread: Optional[StoppableThread] = None\n self._dag_file_processor_agent: Optional[DagTriggerDagFileProcessorAgent] = None\n self._refresh_dag_dir_interval = refresh_dag_dir_interval\n self._notification_service_uri = notification_service_uri\n\n def start(self):\n self._start_dag_file_processor_manager()\n self._dag_runnable_reporting_thread = DagRunnableReportingThread(self._async_mode,\n self._dag_file_processor_agent,\n self._mailbox)\n self._dag_runnable_reporting_thread.start()\n\n self._parsing_stat_process_thread = ParsingStatRetrieveThread(self._dag_file_processor_agent)\n self._parsing_stat_process_thread.start()\n\n def end(self) -> None:\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.terminate()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n self._dag_runnable_reporting_thread.join()\n self._parsing_stat_process_thread.join()\n self._dag_file_processor_agent.end()\n\n def terminate(self):\n if self._dag_file_processor_agent is not None:\n self._dag_file_processor_agent.end()\n if self._dag_runnable_reporting_thread is not None:\n self._dag_runnable_reporting_thread.stop()\n if self._parsing_stat_process_thread is not None:\n self._parsing_stat_process_thread.stop()\n\n def _start_dag_file_processor_manager(self):\n processor_factory = scheduler_job.SchedulerJob._create_dag_file_processor\n\n self._dag_file_processor_agent = DagTriggerDagFileProcessorAgent(self._dag_directory,\n self._max_runs,\n processor_factory,\n self._get_processor_timeout(),\n [],\n self._pickle_dags,\n self._async_mode,\n self._refresh_dag_dir_interval,\n self._notification_service_uri)\n self._dag_file_processor_agent.start()\n\n @staticmethod\n def _get_processor_timeout():\n processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')\n return timedelta(seconds=processor_timeout_seconds)\n\n def is_alive(self) -> bool:\n return self._dag_file_processor_agent is not None \\\n and self._dag_runnable_reporting_thread is not None and self._dag_runnable_reporting_thread.is_alive() \\\n and self._parsing_stat_process_thread is not None and self._parsing_stat_process_thread.is_alive()\n",
"step-ids": [
13,
19,
20,
22,
24
]
}
|
[
13,
19,
20,
22,
24
] |
"""Add uri on identity provider
Revision ID: 52561c782d96
Revises: cdf9f34b764c
Create Date: 2022-03-11 10:16:39.583434
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '52561c782d96'
down_revision = 'cdf9f34b764c'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
# get api urls
urls = bind.execute("SELECT p.id as pid, r.id as rid, r.uri as uri "
"FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id")
# add URI
op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(), nullable=True))
# set api_url as default URI
for url in urls:
bind.execute(f"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}")
# patch Github URI
bind.execute("UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'")
# add constraints
op.alter_column('oauth2_identity_provider', 'uri', nullable=False)
op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])
def downgrade():
# remove URI
op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')
op.drop_column('oauth2_identity_provider', 'uri')
|
normal
|
{
"blob_id": "c185a88332e39c561649f087f01fd3b704e7010b",
"index": 1959,
"step-1": "<mask token>\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-3": "<mask token>\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n urls = bind.execute(\n 'SELECT p.id as pid, r.id as rid, r.uri as uri FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id'\n )\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(),\n nullable=True))\n for url in urls:\n bind.execute(\n f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\"\n )\n bind.execute(\n \"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\"\n )\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-5": "\"\"\"Add uri on identity provider\n\nRevision ID: 52561c782d96\nRevises: cdf9f34b764c\nCreate Date: 2022-03-11 10:16:39.583434\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '52561c782d96'\ndown_revision = 'cdf9f34b764c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n bind = op.get_bind()\n # get api urls\n urls = bind.execute(\"SELECT p.id as pid, r.id as rid, r.uri as uri \"\n \"FROM oauth2_identity_provider p JOIN resource r ON p.api_resource_id = r.id\")\n # add URI\n op.add_column('oauth2_identity_provider', sa.Column('uri', sa.String(), nullable=True))\n # set api_url as default URI\n for url in urls:\n bind.execute(f\"UPDATE oauth2_identity_provider SET uri = '{url[2]}' WHERE id = {url[0]}\")\n # patch Github URI\n bind.execute(\"UPDATE oauth2_identity_provider SET uri = 'https://github.com' WHERE name = 'github'\")\n # add constraints\n op.alter_column('oauth2_identity_provider', 'uri', nullable=False)\n op.create_unique_constraint(None, 'oauth2_identity_provider', ['uri'])\n\n\ndef downgrade():\n # remove URI\n op.drop_constraint(None, 'oauth2_identity_provider', type_='unique')\n op.drop_column('oauth2_identity_provider', 'uri')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,
encoder_shortcut, generator_shortcut, generator_transform, num_word,
emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,
word_rnn_bidirectional, word_attention_size, context_rnn_size,
context_rnn_num_layer, context_rnn_dropout,
context_rnn_bidirectional, context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
self.d_v = d_v
self.d_e = d_e
self.d_t = d_t
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=
word_rnn_size, dropout=word_rnn_dropout, num_layers=
word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional
else word_rnn_size)
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,
self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size,
hidden_size=context_rnn_size, dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=
context_rnn_bidirectional)
context_rnn_output_size = (context_rnn_size * 2 if
context_rnn_bidirectional else context_rnn_size)
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,
1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,
mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.
Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(
pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
<|reserved_special_token_0|>
def generator(self, h):
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith('selector'):
yield param
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,
encoder_shortcut, generator_shortcut, generator_transform, num_word,
emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,
word_rnn_bidirectional, word_attention_size, context_rnn_size,
context_rnn_num_layer, context_rnn_dropout,
context_rnn_bidirectional, context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
self.d_v = d_v
self.d_e = d_e
self.d_t = d_t
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=
word_rnn_size, dropout=word_rnn_dropout, num_layers=
word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional
else word_rnn_size)
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,
self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size,
hidden_size=context_rnn_size, dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=
context_rnn_bidirectional)
context_rnn_output_size = (context_rnn_size * 2 if
context_rnn_bidirectional else context_rnn_size)
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,
1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,
mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.
Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(
pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
<|reserved_special_token_0|>
def generator(self, h):
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
<|reserved_special_token_0|>
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == 'word':
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size,
self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == 'context':
bidirectional_multipier = (2 if self.context_rnn_bidirectional else
1)
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size,
self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
<|reserved_special_token_0|>
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith('selector'):
yield param
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,
encoder_shortcut, generator_shortcut, generator_transform, num_word,
emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,
word_rnn_bidirectional, word_attention_size, context_rnn_size,
context_rnn_num_layer, context_rnn_dropout,
context_rnn_bidirectional, context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
self.d_v = d_v
self.d_e = d_e
self.d_t = d_t
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=
word_rnn_size, dropout=word_rnn_dropout, num_layers=
word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional
else word_rnn_size)
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,
self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size,
hidden_size=context_rnn_size, dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=
context_rnn_bidirectional)
context_rnn_output_size = (context_rnn_size * 2 if
context_rnn_bidirectional else context_rnn_size)
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,
1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,
mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.
Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(
pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
def sampler(self, mean, logvar, cuda):
eps = torch.randn(mean.size()).cuda(cuda)
sigma = torch.exp(logvar)
h = sigma.mul(eps).add_(mean)
return h
def generator(self, h):
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
<|reserved_special_token_0|>
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == 'word':
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size,
self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == 'context':
bidirectional_multipier = (2 if self.context_rnn_bidirectional else
1)
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size,
self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
def continuous_parameters(self):
for name, param in self.named_parameters():
if not name.startswith('selector'):
yield param
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith('selector'):
yield param
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import torch
from torch import nn
import torch.nn.functional as F
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,
encoder_shortcut, generator_shortcut, generator_transform, num_word,
emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,
word_rnn_bidirectional, word_attention_size, context_rnn_size,
context_rnn_num_layer, context_rnn_dropout,
context_rnn_bidirectional, context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
self.d_v = d_v
self.d_e = d_e
self.d_t = d_t
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=
word_rnn_size, dropout=word_rnn_dropout, num_layers=
word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional
else word_rnn_size)
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,
self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size,
hidden_size=context_rnn_size, dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=
context_rnn_bidirectional)
context_rnn_output_size = (context_rnn_size * 2 if
context_rnn_bidirectional else context_rnn_size)
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,
1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,
mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.
Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(
pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
def sampler(self, mean, logvar, cuda):
eps = torch.randn(mean.size()).cuda(cuda)
sigma = torch.exp(logvar)
h = sigma.mul(eps).add_(mean)
return h
def generator(self, h):
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
def decoder(self, r):
p_x_given_h = F.softmax(self.de(r))
return p_x_given_h
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == 'word':
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size,
self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == 'context':
bidirectional_multipier = (2 if self.context_rnn_bidirectional else
1)
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size,
self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
def continuous_parameters(self):
for name, param in self.named_parameters():
if not name.startswith('selector'):
yield param
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith('selector'):
yield param
def forward(self, x, x_indices, input_list, length_list, cuda):
mean, logvar = self.encoder(x)
h = self.sampler(mean, logvar, cuda)
r = self.generator(h)
p_x_given_h = self.decoder(r)
num_utterance = len(input_list)
_, batch_size = input_list[0].size()
word_rnn_hidden = self.init_rnn_hidden(batch_size, level='word')
word_rnn_output_list = []
word_attention_dict = {}
for utterance_index in range(num_utterance):
word_rnn_input = self.embedding(input_list[utterance_index])
word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input,
word_rnn_hidden)
word_attention_weight = self.word_conv_attention_linear(
word_rnn_output)
batch_data = input_list[utterance_index]
for word_i in range(len(batch_data)):
for clause_i in range(len(batch_data[word_i])):
word_index = int(batch_data[word_i, clause_i])
if word_index < self.d_v:
if word_index in word_attention_dict:
word_attention_dict[word_index] = (
word_attention_dict[word_index] +
word_attention_weight[word_i, clause_i, :]) / 2
else:
word_attention_dict[word_index
] = word_attention_weight[word_i, clause_i, :]
word_attention_weight = self.word_conv_attention_linear2(
word_attention_weight)
word_attention_weight = nn.functional.relu(word_attention_weight)
word_attention_weight = nn.functional.softmax(word_attention_weight
, dim=0)
word_rnn_last_output = torch.mul(word_rnn_output,
word_attention_weight).sum(dim=0)
word_rnn_output_list.append(word_rnn_last_output)
word_rnn_hidden = word_rnn_hidden.detach()
context_rnn_hidden = self.init_rnn_hidden(batch_size, level='context')
context_rnn_input = torch.stack(word_rnn_output_list, dim=0)
context_rnn_output, context_rnn_hidden = self.context_rnn(
context_rnn_input, context_rnn_hidden)
context_attention_weight = self.context_conv_attention_linear(
context_rnn_output)
context_attention_weight = nn.functional.relu(context_attention_weight)
context_attention_weight = nn.functional.softmax(
context_attention_weight, dim=0)
context_rnn_last_output = torch.mul(context_rnn_output,
context_attention_weight).sum(dim=0)
classifier_input = context_rnn_last_output
logit = self.classifier(classifier_input)
return mean, logvar, p_x_given_h, logit, word_attention_dict
<|reserved_special_token_1|>
import torch
from torch import nn
import torch.nn.functional as F
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,
num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,
context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
##NGTM:
self.d_v = d_v # vocabulary size
self.d_e = d_e # dimensionality of encoder
self.d_t = d_t # number of topics
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform # transform to apply after the generator
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
# self.mean_bn = nn.BatchNorm1d(self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
# self.logvar_bn = nn.BatchNorm1d(self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
# self.de_bn = nn.BatchNorm1d(self.d_v)
##HAN:
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,
num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)
context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),
nn.LeakyReLU(),
nn.Linear(mlp_size, num_label),
nn.Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
# mean = self.mean_bn(self.mean_fc(pi))
# logvar = self.logvar_bn(self.logvar_fc(pi))
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
def sampler(self, mean, logvar, cuda):
eps = torch.randn(mean.size()).cuda(cuda)
sigma = torch.exp(logvar)
h = sigma.mul(eps).add_(mean)
return h
def generator(self, h):
# temp = self.generator1(h)
# if self.generator_shortcut:
# r = F.tanh(temp) + h
# else:
# r = temp
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
def decoder(self, r):
# p_x_given_h = F.softmax(self.de_bn(self.de(r)))
p_x_given_h = F.softmax(self.de(r))
return p_x_given_h
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == "word":
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == "context":
bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
def continuous_parameters(self):
for name, param in self.named_parameters():
if not name.startswith("selector"):
yield param
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith("selector"):
yield param
def forward(self, x, x_indices, input_list, length_list, cuda):
###topic model
mean, logvar = self.encoder(x) # batchsize*50
h = self.sampler(mean, logvar, cuda) # batchsize*50
r = self.generator(h) # batchsize*50
p_x_given_h = self.decoder(r) # batchsize*dv
###HAN
num_utterance = len(input_list) # one batch doucument_list
_, batch_size = input_list[0].size()
# word-level rnn
word_rnn_hidden = self.init_rnn_hidden(batch_size, level="word")
word_rnn_output_list = []
word_attention_dict = {}
# de_weight = torch.zeros(self.d_v, self.d_t).cuda()
# de_weight.copy_(self.de.weight.data)
for utterance_index in range(num_utterance):
word_rnn_input = self.embedding(input_list[utterance_index])
word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)
word_attention_weight = self.word_conv_attention_linear(word_rnn_output)
# word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())
batch_data = input_list[utterance_index]
for word_i in range(len(batch_data)): # word_i word
for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)
word_index = int(batch_data[word_i, clause_i]) # word index
if word_index < self.d_v:
if word_index in word_attention_dict:
word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2
else:
word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]
##HAN
word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)
word_attention_weight = nn.functional.relu(word_attention_weight)
word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)
word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)
word_rnn_output_list.append(word_rnn_last_output)
word_rnn_hidden = word_rnn_hidden.detach()
# context-level rnn
context_rnn_hidden = self.init_rnn_hidden(batch_size, level="context")
context_rnn_input = torch.stack(word_rnn_output_list, dim=0)
context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)
context_attention_weight = self.context_conv_attention_linear(context_rnn_output)
context_attention_weight = nn.functional.relu(context_attention_weight)
context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)
context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)
classifier_input = context_rnn_last_output
logit = self.classifier(classifier_input)
return mean, logvar, p_x_given_h, logit, word_attention_dict
|
flexible
|
{
"blob_id": "4f3e297b6925f8d65aacaa59bb837e746747c33f",
"index": 2608,
"step-1": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n <mask token>\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n <mask token>\n <mask token>\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n <mask token>\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n <mask token>\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith('selector'):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-4": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n\n def decoder(self, r):\n p_x_given_h = F.softmax(self.de(r))\n return p_x_given_h\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith('selector'):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n\n def forward(self, x, x_indices, input_list, length_list, cuda):\n mean, logvar = self.encoder(x)\n h = self.sampler(mean, logvar, cuda)\n r = self.generator(h)\n p_x_given_h = self.decoder(r)\n num_utterance = len(input_list)\n _, batch_size = input_list[0].size()\n word_rnn_hidden = self.init_rnn_hidden(batch_size, level='word')\n word_rnn_output_list = []\n word_attention_dict = {}\n for utterance_index in range(num_utterance):\n word_rnn_input = self.embedding(input_list[utterance_index])\n word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input,\n word_rnn_hidden)\n word_attention_weight = self.word_conv_attention_linear(\n word_rnn_output)\n batch_data = input_list[utterance_index]\n for word_i in range(len(batch_data)):\n for clause_i in range(len(batch_data[word_i])):\n word_index = int(batch_data[word_i, clause_i])\n if word_index < self.d_v:\n if word_index in word_attention_dict:\n word_attention_dict[word_index] = (\n word_attention_dict[word_index] +\n word_attention_weight[word_i, clause_i, :]) / 2\n else:\n word_attention_dict[word_index\n ] = word_attention_weight[word_i, clause_i, :]\n word_attention_weight = self.word_conv_attention_linear2(\n word_attention_weight)\n word_attention_weight = nn.functional.relu(word_attention_weight)\n word_attention_weight = nn.functional.softmax(word_attention_weight\n , dim=0)\n word_rnn_last_output = torch.mul(word_rnn_output,\n word_attention_weight).sum(dim=0)\n word_rnn_output_list.append(word_rnn_last_output)\n word_rnn_hidden = word_rnn_hidden.detach()\n context_rnn_hidden = self.init_rnn_hidden(batch_size, level='context')\n context_rnn_input = torch.stack(word_rnn_output_list, dim=0)\n context_rnn_output, context_rnn_hidden = self.context_rnn(\n context_rnn_input, context_rnn_hidden)\n context_attention_weight = self.context_conv_attention_linear(\n context_rnn_output)\n context_attention_weight = nn.functional.relu(context_attention_weight)\n context_attention_weight = nn.functional.softmax(\n context_attention_weight, dim=0)\n context_rnn_last_output = torch.mul(context_rnn_output,\n context_attention_weight).sum(dim=0)\n classifier_input = context_rnn_last_output\n logit = self.classifier(classifier_input)\n return mean, logvar, p_x_given_h, logit, word_attention_dict\n",
"step-5": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass JointModel(nn.Module):\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,\n num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,\n context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n\n super(JointModel, self).__init__()\n\n ##NGTM:\n self.d_v = d_v # vocabulary size\n self.d_e = d_e # dimensionality of encoder\n self.d_t = d_t # number of topics\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform # transform to apply after the generator\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n # self.mean_bn = nn.BatchNorm1d(self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n # self.logvar_bn = nn.BatchNorm1d(self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n # self.de_bn = nn.BatchNorm1d(self.d_v)\n\n ##HAN:\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,\n num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)\n context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),\n nn.LeakyReLU(),\n nn.Linear(mlp_size, num_label),\n nn.Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)\n\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n\n # mean = self.mean_bn(self.mean_fc(pi))\n # logvar = self.logvar_bn(self.logvar_fc(pi))\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n# temp = self.generator1(h)\n# if self.generator_shortcut:\n# r = F.tanh(temp) + h\n# else:\n# r = temp\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n\n def decoder(self, r):\n # p_x_given_h = F.softmax(self.de_bn(self.de(r)))\n p_x_given_h = F.softmax(self.de(r))\n return p_x_given_h\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == \"word\":\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == \"context\":\n bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith(\"selector\"):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith(\"selector\"):\n yield param\n\n def forward(self, x, x_indices, input_list, length_list, cuda):\n ###topic model\n mean, logvar = self.encoder(x) # batchsize*50\n h = self.sampler(mean, logvar, cuda) # batchsize*50\n r = self.generator(h) # batchsize*50\n p_x_given_h = self.decoder(r) # batchsize*dv\n ###HAN\n num_utterance = len(input_list) # one batch doucument_list\n _, batch_size = input_list[0].size()\n # word-level rnn\n word_rnn_hidden = self.init_rnn_hidden(batch_size, level=\"word\")\n word_rnn_output_list = []\n word_attention_dict = {}\n # de_weight = torch.zeros(self.d_v, self.d_t).cuda()\n # de_weight.copy_(self.de.weight.data)\n for utterance_index in range(num_utterance):\n word_rnn_input = self.embedding(input_list[utterance_index])\n word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)\n word_attention_weight = self.word_conv_attention_linear(word_rnn_output)\n\n # word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())\n batch_data = input_list[utterance_index]\n for word_i in range(len(batch_data)): # word_i word\n for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)\n word_index = int(batch_data[word_i, clause_i]) # word index\n if word_index < self.d_v:\n if word_index in word_attention_dict:\n word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2\n else:\n word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]\n\n ##HAN\n word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)\n word_attention_weight = nn.functional.relu(word_attention_weight)\n word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)\n word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)\n word_rnn_output_list.append(word_rnn_last_output)\n word_rnn_hidden = word_rnn_hidden.detach()\n # context-level rnn\n context_rnn_hidden = self.init_rnn_hidden(batch_size, level=\"context\")\n context_rnn_input = torch.stack(word_rnn_output_list, dim=0)\n context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)\n context_attention_weight = self.context_conv_attention_linear(context_rnn_output)\n context_attention_weight = nn.functional.relu(context_attention_weight)\n context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)\n context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)\n classifier_input = context_rnn_last_output\n logit = self.classifier(classifier_input)\n\n return mean, logvar, p_x_given_h, logit, word_attention_dict",
"step-ids": [
5,
6,
8,
11,
12
]
}
|
[
5,
6,
8,
11,
12
] |
from tkinter import *
from tkinter import messagebox as mb
from tkinter.scrolledtext import ScrolledText
from tkinter import filedialog as fd
from child_window import ChildWindow
# from PIL import Image as PilImage
# from PIL import ImageTk, ImageOps
class Window:
def __init__(self, width, height, title="MyWindow", resizable=(False, False), icon=r"resources/feather.ico"):
self.root = Tk()
self.root.title(title)
# self.root.geometry(f"{width}x{height}+200+200")
self.root.geometry("+600+300")
# self.root.resizable(resizable[0], resizable[1])
if icon:
self.root.iconbitmap(icon)
self.text = ScrolledText(self.root)
def run(self):
self.draw_widgets()
self.root.mainloop()
def draw_widgets(self):
self.draw_menu()
self.text.pack()
def draw_menu(self):
menu_bar = Menu(self.root)
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="Открыть", command=self.open_file)
file_menu.add_command(label="Сохранить как", command=self.save_file)
file_menu.add_command(label="Отркыть папку", command=self.open_dir)
file_menu.add_separator()
file_menu.add_command(label="Выйти", command=self.exit)
info_menu = Menu(menu_bar, tearoff=0)
info_menu.add_command(label="О приложении", command=self.show_info)
menu_bar.add_cascade(label="Файл", menu=file_menu)
menu_bar.add_cascade(label="Справка", menu=info_menu)
self.root.configure(menu=menu_bar)
def open_file(self):
# wanted_files = (
# ("IMAGES", "*.jpeg;*.png;*.gif"),
# ("TEXT files", "*.txt;*.log"),
# ("PY files", "*.py"),
# ("ALL", "*.*")
# )
#
# file_name = fd.askopenfilename(initialdir="D:/", title="FIND A FILE", filetypes=wanted_files)
# self.text.insert(END, f"Надо открыть файл: {file_name}\nСодержимое:\n")
# if file_name:
# with open(file_name, "r") as f:
# self.text.insert(END, f.read())
# file = fd.askopenfile()
# self.text.insert(END, file.read())
# file.close()
file_names = fd.askopenfilenames()
self.text.insert(END, str(file_names))
def save_file(self):
name = fd.asksaveasfilename(filetypes=(("TEXT files", "*.txt"), ("Py files", "*.py")))
if name:
self.text.insert(END, f"Сохранить файл по пути {name}\n")
# with open(name, "w") as f:
# f.write("123")
# file = fd.asksaveasfile()
# file.write("123")
# file.close()
def open_dir(self):
path = fd.askdirectory(mustexist=True)
self.text.insert(END, f"Папка {path}\n")
def show_info(self):
mb.showinfo("Информация", "Лучшее графическое приложение на свете")
def exit(self):
choice = mb.askyesno("Quit", "Do you want to quit?")
if choice:
self.root.destroy()
def create_child(self, width, height, title="Child", resizable=(False, False), icon=None):
ChildWindow(self.root, width, height, title, resizable, icon)
if __name__ == "__main__":
window = Window(500, 500, "TKINTER")
# window.create_child(200, 100)
window.run()
|
normal
|
{
"blob_id": "02d4e1ddb0b4cf75c9902e13263c5a80417de01b",
"index": 6530,
"step-1": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n <mask token>\n <mask token>\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n <mask token>\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n <mask token>\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n\n def show_info(self):\n mb.showinfo('Информация', 'Лучшее графическое приложение на свете')\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Window:\n\n def __init__(self, width, height, title='MyWindow', resizable=(False, \n False), icon='resources/feather.ico'):\n self.root = Tk()\n self.root.title(title)\n self.root.geometry('+600+300')\n if icon:\n self.root.iconbitmap(icon)\n self.text = ScrolledText(self.root)\n\n def run(self):\n self.draw_widgets()\n self.root.mainloop()\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label='Открыть', command=self.open_file)\n file_menu.add_command(label='Сохранить как', command=self.save_file)\n file_menu.add_command(label='Отркыть папку', command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label='Выйти', command=self.exit)\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label='О приложении', command=self.show_info)\n menu_bar.add_cascade(label='Файл', menu=file_menu)\n menu_bar.add_cascade(label='Справка', menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=(('TEXT files', '*.txt'), (\n 'Py files', '*.py')))\n if name:\n self.text.insert(END, f'Сохранить файл по пути {name}\\n')\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f'Папка {path}\\n')\n\n def show_info(self):\n mb.showinfo('Информация', 'Лучшее графическое приложение на свете')\n\n def exit(self):\n choice = mb.askyesno('Quit', 'Do you want to quit?')\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title='Child', resizable=(False, \n False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\nif __name__ == '__main__':\n window = Window(500, 500, 'TKINTER')\n window.run()\n",
"step-5": "from tkinter import *\nfrom tkinter import messagebox as mb\nfrom tkinter.scrolledtext import ScrolledText\nfrom tkinter import filedialog as fd\nfrom child_window import ChildWindow\n# from PIL import Image as PilImage\n# from PIL import ImageTk, ImageOps\n\n\nclass Window:\n def __init__(self, width, height, title=\"MyWindow\", resizable=(False, False), icon=r\"resources/feather.ico\"):\n self.root = Tk()\n self.root.title(title)\n # self.root.geometry(f\"{width}x{height}+200+200\")\n self.root.geometry(\"+600+300\")\n # self.root.resizable(resizable[0], resizable[1])\n if icon:\n self.root.iconbitmap(icon)\n\n self.text = ScrolledText(self.root)\n\n def run(self):\n self.draw_widgets()\n self.root.mainloop()\n\n def draw_widgets(self):\n self.draw_menu()\n self.text.pack()\n\n def draw_menu(self):\n menu_bar = Menu(self.root)\n\n file_menu = Menu(menu_bar, tearoff=0)\n file_menu.add_command(label=\"Открыть\", command=self.open_file)\n file_menu.add_command(label=\"Сохранить как\", command=self.save_file)\n file_menu.add_command(label=\"Отркыть папку\", command=self.open_dir)\n file_menu.add_separator()\n file_menu.add_command(label=\"Выйти\", command=self.exit)\n\n info_menu = Menu(menu_bar, tearoff=0)\n info_menu.add_command(label=\"О приложении\", command=self.show_info)\n\n menu_bar.add_cascade(label=\"Файл\", menu=file_menu)\n menu_bar.add_cascade(label=\"Справка\", menu=info_menu)\n self.root.configure(menu=menu_bar)\n\n def open_file(self):\n # wanted_files = (\n # (\"IMAGES\", \"*.jpeg;*.png;*.gif\"),\n # (\"TEXT files\", \"*.txt;*.log\"),\n # (\"PY files\", \"*.py\"),\n # (\"ALL\", \"*.*\")\n # )\n #\n # file_name = fd.askopenfilename(initialdir=\"D:/\", title=\"FIND A FILE\", filetypes=wanted_files)\n # self.text.insert(END, f\"Надо открыть файл: {file_name}\\nСодержимое:\\n\")\n # if file_name:\n # with open(file_name, \"r\") as f:\n # self.text.insert(END, f.read())\n\n # file = fd.askopenfile()\n # self.text.insert(END, file.read())\n # file.close()\n\n file_names = fd.askopenfilenames()\n self.text.insert(END, str(file_names))\n\n def save_file(self):\n name = fd.asksaveasfilename(filetypes=((\"TEXT files\", \"*.txt\"), (\"Py files\", \"*.py\")))\n if name:\n self.text.insert(END, f\"Сохранить файл по пути {name}\\n\")\n # with open(name, \"w\") as f:\n # f.write(\"123\")\n\n # file = fd.asksaveasfile()\n # file.write(\"123\")\n # file.close()\n\n def open_dir(self):\n path = fd.askdirectory(mustexist=True)\n self.text.insert(END, f\"Папка {path}\\n\")\n\n def show_info(self):\n mb.showinfo(\"Информация\", \"Лучшее графическое приложение на свете\")\n\n def exit(self):\n choice = mb.askyesno(\"Quit\", \"Do you want to quit?\")\n if choice:\n self.root.destroy()\n\n def create_child(self, width, height, title=\"Child\", resizable=(False, False), icon=None):\n ChildWindow(self.root, width, height, title, resizable, icon)\n\n\nif __name__ == \"__main__\":\n window = Window(500, 500, \"TKINTER\")\n # window.create_child(200, 100)\n window.run()\n\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
def standev(vals):
mean = avg(vals)
var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)
return sqrt(var)
<|reserved_special_token_0|>
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
<|reserved_special_token_0|>
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if class_val not in data_by_class:
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
<|reserved_special_token_0|>
def data_stats(data):
stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]
del stats[-1]
return stats
<|reserved_special_token_0|>
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train, test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats, row)
preds.append(result)
return preds
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data, target)
for i in range(len(data[0]) - 1):
str_column_to_float(data, i)
int_from_string_col(data, len(data[0]) - 1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print('10-fold Cross-Validation Accuracy Scores')
for score in accuracies:
print('%.4f%%' % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def avg(vals):
return sum(vals) / float(len(vals))
def standev(vals):
mean = avg(vals)
var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)
return sqrt(var)
<|reserved_special_token_0|>
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
<|reserved_special_token_0|>
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if class_val not in data_by_class:
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
<|reserved_special_token_0|>
def data_stats(data):
stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]
del stats[-1]
return stats
<|reserved_special_token_0|>
def class_get_prob(stats, instance):
num_rows = sum([stats[label][0][2] for label in stats])
prob_vals = dict()
for class_val, class_stats in stats.items():
prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)
for i in range(len(class_stats)):
avg, standev, size = class_stats[i]
prob_vals[class_val] *= probability(instance[i], avg, standev)
return prob_vals
<|reserved_special_token_0|>
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train, test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats, row)
preds.append(result)
return preds
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data, target)
for i in range(len(data[0]) - 1):
str_column_to_float(data, i)
int_from_string_col(data, len(data[0]) - 1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print('10-fold Cross-Validation Accuracy Scores')
for score in accuracies:
print('%.4f%%' % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def avg(vals):
return sum(vals) / float(len(vals))
def standev(vals):
mean = avg(vals)
var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)
return sqrt(var)
<|reserved_special_token_0|>
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
<|reserved_special_token_0|>
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if class_val not in data_by_class:
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
<|reserved_special_token_0|>
def data_stats(data):
stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]
del stats[-1]
return stats
<|reserved_special_token_0|>
def class_get_prob(stats, instance):
num_rows = sum([stats[label][0][2] for label in stats])
prob_vals = dict()
for class_val, class_stats in stats.items():
prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)
for i in range(len(class_stats)):
avg, standev, size = class_stats[i]
prob_vals[class_val] *= probability(instance[i], avg, standev)
return prob_vals
<|reserved_special_token_0|>
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train, test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats, row)
preds.append(result)
return preds
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data, target)
for i in range(len(data[0]) - 1):
str_column_to_float(data, i)
int_from_string_col(data, len(data[0]) - 1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print('10-fold Cross-Validation Accuracy Scores')
for score in accuracies:
print('%.4f%%' % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))
<|reserved_special_token_1|>
from math import sqrt, pi, exp
from csv import reader
from random import seed, randrange
<|reserved_special_token_0|>
def probability(x, avg, standev):
exponent = exp(-((x - avg) ** 2 / (2 * standev ** 2)))
return 1 / (sqrt(2 * pi) * standev) * exponent
def avg(vals):
return sum(vals) / float(len(vals))
def standev(vals):
mean = avg(vals)
var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)
return sqrt(var)
<|reserved_special_token_0|>
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def int_from_string_col(data, col):
class_val = [row[col] for row in data]
unique_set = set(class_val)
lookup = dict()
for i, val in enumerate(unique_set):
lookup[val] = i
for row in data:
row[col] = lookup[row[col]]
return lookup
def move_class_to_last_col(data, col):
for row in data:
temp = row[col]
del row[col]
row.append(temp)
return data
<|reserved_special_token_0|>
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if class_val not in data_by_class:
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
<|reserved_special_token_0|>
def data_stats(data):
stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]
del stats[-1]
return stats
def class_stats(data):
split = split_class(data)
class_stats = dict()
for class_val, row in split.items():
class_stats[class_val] = data_stats(row)
return class_stats
<|reserved_special_token_0|>
def class_get_prob(stats, instance):
num_rows = sum([stats[label][0][2] for label in stats])
prob_vals = dict()
for class_val, class_stats in stats.items():
prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)
for i in range(len(class_stats)):
avg, standev, size = class_stats[i]
prob_vals[class_val] *= probability(instance[i], avg, standev)
return prob_vals
def predict(stats, instance):
prob_vals = class_get_prob(stats, instance)
top_prob, top_label = -1, None
for class_val, prob in prob_vals.items():
if top_label is None or prob > top_prob:
top_prob = prob
top_label = class_val
return top_label
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train, test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats, row)
preds.append(result)
return preds
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data, target)
for i in range(len(data[0]) - 1):
str_column_to_float(data, i)
int_from_string_col(data, len(data[0]) - 1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print('10-fold Cross-Validation Accuracy Scores')
for score in accuracies:
print('%.4f%%' % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#imports
from math import sqrt, pi, exp
from csv import reader
from random import seed,randrange
"""
Helper functions
"""
#calculate probability
def probability(x,avg,standev):
exponent = exp(-((x-avg)**2 / (2 * standev**2)))
return (1/(sqrt(2*pi) *standev)) * exponent
#mean
def avg(vals):
return sum(vals)/float(len(vals))
#standard deviation
def standev(vals):
mean = avg(vals)
var = sum([(x-mean)**2 for x in vals]) / float(len(vals)-1)
return sqrt(var)
"""
Data Handling
"""
def read_csv(file_name):
data = list()
with open(file_name, 'r') as file:
csv = reader(file)
for row in csv:
if not row:
continue
data.append(row)
return data
# Convert string column to float
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def int_from_string_col(data,col):
class_val =[row[col] for row in data]
unique_set = set(class_val)
lookup = dict()
for i, val in enumerate(unique_set):
lookup[val] = i
for row in data:
row[col] = lookup[row[col]]
return lookup
def move_class_to_last_col(data,col):
for row in data:
temp = row[col]
del row[col]
row.append(temp)
return data
"""
Implementation Functions
"""
"""
We need to calculate the probability of data according to their class so the
training data needs to be split up by classes. In order to do this we need to
establish the column that represents the class value for each dataset.
"""
# this works for datasets with last column representing class value
def split_class(data):
data_by_class = dict()
for i in range(len(data)):
instance = data[i]
class_val = instance[-1]
if(class_val not in data_by_class):
data_by_class[class_val] = list()
data_by_class[class_val].append(instance)
return data_by_class
"""
We need to find the mean and standard deviation for each column of input.
"""
def data_stats(data):
stats = [(avg(col),standev(col),len(col)) for col in zip(*data)]
del(stats[-1])
return stats
def class_stats(data):
split = split_class(data)
class_stats = dict()
for class_val, row in split.items():
class_stats[class_val] = data_stats(row)
return class_stats
"""
Calculate Class Probabilities
"""
def class_get_prob(stats,instance):
num_rows = sum([stats[label][0][2] for label in stats])
prob_vals = dict()
for class_val, class_stats in stats.items():
prob_vals[class_val] = stats[class_val][0][2]/float(num_rows)
for i in range(len(class_stats)):
avg,standev,size = class_stats[i]
prob_vals[class_val] *= probability(instance[i],avg,standev)
return prob_vals
def predict(stats,instance):
prob_vals = class_get_prob(stats,instance)
top_prob, top_label = -1, None
for class_val, prob in prob_vals.items():
if top_label is None or prob > top_prob:
top_prob = prob
top_label = class_val
return top_label
def cross_validation_split(data, n_folds):
data_split = list()
copy = list(data)
fold_size = int(len(data) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(copy))
fold.append(copy.pop(index))
data_split.append(fold)
return data_split
def evaluate(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def cross_validation(data, algo, n_folds, *args):
folds = cross_validation_split(data, n_folds)
accuracy_list = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
copy = list(row)
test_set.append(copy)
copy[-1] = None
predicted = algo(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = evaluate(actual, predicted)
accuracy_list.append(accuracy)
return accuracy_list
def naive_bayes(train,test):
stats = class_stats(train)
preds = list()
for row in test:
result = predict(stats,row)
preds.append(result)
return(preds)
def run(file_name, target):
seed(1)
data = read_csv(file_name)
data = move_class_to_last_col(data,target)
for i in range(len(data[0])-1):
str_column_to_float(data,i)
int_from_string_col(data,len(data[0])-1)
n_folds = 10
accuracies = cross_validation(data, naive_bayes, n_folds)
print("10-fold Cross-Validation Accuracy Scores")
for score in accuracies:
print("%.4f%%" % score)
print('Mean Accuracy: %.4f%%' % (sum(accuracies)/float(len(accuracies))))
|
flexible
|
{
"blob_id": "f92a1398a27541557ec5bbf752d44ce40d1df94a",
"index": 4131,
"step-1": "<mask token>\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\n<mask token>\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-2": "<mask token>\n\n\ndef avg(vals):\n return sum(vals) / float(len(vals))\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\n<mask token>\n\n\ndef class_get_prob(stats, instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)\n for i in range(len(class_stats)):\n avg, standev, size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i], avg, standev)\n return prob_vals\n\n\n<mask token>\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-3": "<mask token>\n\n\ndef avg(vals):\n return sum(vals) / float(len(vals))\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\ndef str_column_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column].strip())\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\n<mask token>\n\n\ndef class_get_prob(stats, instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)\n for i in range(len(class_stats)):\n avg, standev, size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i], avg, standev)\n return prob_vals\n\n\n<mask token>\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-4": "from math import sqrt, pi, exp\nfrom csv import reader\nfrom random import seed, randrange\n<mask token>\n\n\ndef probability(x, avg, standev):\n exponent = exp(-((x - avg) ** 2 / (2 * standev ** 2)))\n return 1 / (sqrt(2 * pi) * standev) * exponent\n\n\ndef avg(vals):\n return sum(vals) / float(len(vals))\n\n\ndef standev(vals):\n mean = avg(vals)\n var = sum([((x - mean) ** 2) for x in vals]) / float(len(vals) - 1)\n return sqrt(var)\n\n\n<mask token>\n\n\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n\ndef str_column_to_float(dataset, column):\n for row in dataset:\n row[column] = float(row[column].strip())\n\n\ndef int_from_string_col(data, col):\n class_val = [row[col] for row in data]\n unique_set = set(class_val)\n lookup = dict()\n for i, val in enumerate(unique_set):\n lookup[val] = i\n for row in data:\n row[col] = lookup[row[col]]\n return lookup\n\n\ndef move_class_to_last_col(data, col):\n for row in data:\n temp = row[col]\n del row[col]\n row.append(temp)\n return data\n\n\n<mask token>\n\n\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if class_val not in data_by_class:\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n\n<mask token>\n\n\ndef data_stats(data):\n stats = [(avg(col), standev(col), len(col)) for col in zip(*data)]\n del stats[-1]\n return stats\n\n\ndef class_stats(data):\n split = split_class(data)\n class_stats = dict()\n for class_val, row in split.items():\n class_stats[class_val] = data_stats(row)\n return class_stats\n\n\n<mask token>\n\n\ndef class_get_prob(stats, instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2] / float(num_rows)\n for i in range(len(class_stats)):\n avg, standev, size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i], avg, standev)\n return prob_vals\n\n\ndef predict(stats, instance):\n prob_vals = class_get_prob(stats, instance)\n top_prob, top_label = -1, None\n for class_val, prob in prob_vals.items():\n if top_label is None or prob > top_prob:\n top_prob = prob\n top_label = class_val\n return top_label\n\n\ndef cross_validation_split(data, n_folds):\n data_split = list()\n copy = list(data)\n fold_size = int(len(data) / n_folds)\n for _ in range(n_folds):\n fold = list()\n while len(fold) < fold_size:\n index = randrange(len(copy))\n fold.append(copy.pop(index))\n data_split.append(fold)\n return data_split\n\n\ndef evaluate(actual, predicted):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n return correct / float(len(actual)) * 100.0\n\n\ndef cross_validation(data, algo, n_folds, *args):\n folds = cross_validation_split(data, n_folds)\n accuracy_list = list()\n for fold in folds:\n train_set = list(folds)\n train_set.remove(fold)\n train_set = sum(train_set, [])\n test_set = list()\n for row in fold:\n copy = list(row)\n test_set.append(copy)\n copy[-1] = None\n predicted = algo(train_set, test_set, *args)\n actual = [row[-1] for row in fold]\n accuracy = evaluate(actual, predicted)\n accuracy_list.append(accuracy)\n return accuracy_list\n\n\ndef naive_bayes(train, test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats, row)\n preds.append(result)\n return preds\n\n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data, target)\n for i in range(len(data[0]) - 1):\n str_column_to_float(data, i)\n int_from_string_col(data, len(data[0]) - 1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print('10-fold Cross-Validation Accuracy Scores')\n for score in accuracies:\n print('%.4f%%' % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies) / float(len(accuracies))))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n#imports\nfrom math import sqrt, pi, exp\nfrom csv import reader\nfrom random import seed,randrange\n\n\n\"\"\"\nHelper functions\n\"\"\"\n#calculate probability\ndef probability(x,avg,standev):\n exponent = exp(-((x-avg)**2 / (2 * standev**2)))\n return (1/(sqrt(2*pi) *standev)) * exponent\n\n#mean\ndef avg(vals):\n return sum(vals)/float(len(vals))\n\n#standard deviation\ndef standev(vals):\n mean = avg(vals)\n var = sum([(x-mean)**2 for x in vals]) / float(len(vals)-1)\n return sqrt(var)\n\n\"\"\"\nData Handling\n\"\"\"\ndef read_csv(file_name):\n data = list()\n with open(file_name, 'r') as file:\n csv = reader(file)\n for row in csv:\n if not row:\n continue\n data.append(row)\n return data\n\n# Convert string column to float\ndef str_column_to_float(dataset, column):\n\tfor row in dataset:\n\t\trow[column] = float(row[column].strip())\n\ndef int_from_string_col(data,col):\n class_val =[row[col] for row in data]\n unique_set = set(class_val)\n lookup = dict()\n for i, val in enumerate(unique_set):\n lookup[val] = i\n for row in data:\n row[col] = lookup[row[col]]\n return lookup\n\ndef move_class_to_last_col(data,col):\n for row in data:\n temp = row[col]\n del row[col]\n row.append(temp)\n return data\n\n\n\"\"\"\nImplementation Functions\n\"\"\"\n\n\"\"\"\nWe need to calculate the probability of data according to their class so the \ntraining data needs to be split up by classes. In order to do this we need to \nestablish the column that represents the class value for each dataset. \n\"\"\"\n# this works for datasets with last column representing class value\ndef split_class(data):\n data_by_class = dict()\n for i in range(len(data)):\n instance = data[i]\n class_val = instance[-1]\n if(class_val not in data_by_class):\n data_by_class[class_val] = list()\n data_by_class[class_val].append(instance)\n return data_by_class\n\n \n\n\n\"\"\"\nWe need to find the mean and standard deviation for each column of input.\n\"\"\"\n\ndef data_stats(data):\n stats = [(avg(col),standev(col),len(col)) for col in zip(*data)] \n del(stats[-1])\n return stats\n \ndef class_stats(data):\n split = split_class(data)\n class_stats = dict()\n for class_val, row in split.items():\n class_stats[class_val] = data_stats(row)\n return class_stats\n\n\"\"\"\nCalculate Class Probabilities\n\"\"\"\ndef class_get_prob(stats,instance):\n num_rows = sum([stats[label][0][2] for label in stats])\n prob_vals = dict()\n for class_val, class_stats in stats.items():\n prob_vals[class_val] = stats[class_val][0][2]/float(num_rows)\n for i in range(len(class_stats)):\n avg,standev,size = class_stats[i]\n prob_vals[class_val] *= probability(instance[i],avg,standev)\n return prob_vals\n\ndef predict(stats,instance):\n prob_vals = class_get_prob(stats,instance)\n top_prob, top_label = -1, None\n for class_val, prob in prob_vals.items():\n if top_label is None or prob > top_prob:\n top_prob = prob\n top_label = class_val\n return top_label\n\ndef cross_validation_split(data, n_folds):\n\tdata_split = list()\n\tcopy = list(data)\n\tfold_size = int(len(data) / n_folds)\n\tfor _ in range(n_folds):\n\t\tfold = list()\n\t\twhile len(fold) < fold_size:\n\t\t\tindex = randrange(len(copy))\n\t\t\tfold.append(copy.pop(index))\n\t\tdata_split.append(fold)\n\treturn data_split\n \ndef evaluate(actual, predicted):\n\tcorrect = 0\n\tfor i in range(len(actual)):\n\t\tif actual[i] == predicted[i]:\n\t\t\tcorrect += 1\n\treturn correct / float(len(actual)) * 100.0\n \ndef cross_validation(data, algo, n_folds, *args):\n\tfolds = cross_validation_split(data, n_folds)\n\taccuracy_list = list()\n\tfor fold in folds:\n\t\ttrain_set = list(folds)\n\t\ttrain_set.remove(fold)\n\t\ttrain_set = sum(train_set, [])\n\t\ttest_set = list()\n\t\tfor row in fold:\n\t\t\tcopy = list(row)\n\t\t\ttest_set.append(copy)\n\t\t\tcopy[-1] = None\n\t\tpredicted = algo(train_set, test_set, *args)\n\t\tactual = [row[-1] for row in fold]\n\t\taccuracy = evaluate(actual, predicted)\n\t\taccuracy_list.append(accuracy)\n\treturn accuracy_list\n\ndef naive_bayes(train,test):\n stats = class_stats(train)\n preds = list()\n for row in test:\n result = predict(stats,row)\n preds.append(result)\n return(preds)\n \n\ndef run(file_name, target):\n seed(1)\n data = read_csv(file_name)\n data = move_class_to_last_col(data,target)\n for i in range(len(data[0])-1):\n str_column_to_float(data,i)\n int_from_string_col(data,len(data[0])-1)\n n_folds = 10\n accuracies = cross_validation(data, naive_bayes, n_folds)\n print(\"10-fold Cross-Validation Accuracy Scores\")\n for score in accuracies:\n print(\"%.4f%%\" % score)\n print('Mean Accuracy: %.4f%%' % (sum(accuracies)/float(len(accuracies))))\n \n\n ",
"step-ids": [
9,
11,
12,
18,
19
]
}
|
[
9,
11,
12,
18,
19
] |
# -*- coding: utf-8 -*-
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.middleware.csrf import get_token
from django.template.context import Context
from django.utils.translation import ugettext_lazy as _
from account.models import Employee
from amortization import settings
from models import MenuItem
from task.forms import RequestForm
from task.models import Request, Task
__author__ = 'cm'
from django.template.loader import get_template
def base_context(request):
author = settings.ADMINS[0][0]
version = settings.VERSION
csrf_token = get_token(request)
media_url = settings.MEDIA_URL
app_name = _('Amortization & Expertise')
path = request.path
logout = False
employee = None
usr = request.user
menu = MenuItem.objects.filter(for_staff=False).order_by('order')
if usr.is_authenticated():
logout = True
employee = Employee.objects.filter(user=usr)
if employee:
employee = employee[0]
if employee.user.is_staff:
menu = MenuItem.objects.order_by('order')
return locals()
def main(request):
c = base_context(request)
template = get_template("index.html")
c['title'] = _('Request')
form = RequestForm()
# if user is authenticated
user = request.user
c['user'] = user
if user.is_authenticated():
e = Employee.objects.filter(user=user)
if e:
empl = e[0]
form = RequestForm(initial={'fio': empl.fio, 'tab_number': empl.tab_number, 'post': empl.post, 'cabinet': empl.cabinet})
c['logout'] = True
c['form'] = form
if request.method == 'POST':
postdata = request.POST.copy()
form = RequestForm(request.POST)
if form.is_valid():
empl = Employee.objects.filter(tab_number = postdata.get('tab_number', 0))
if not empl:
# django user ---
if user.is_authenticated():
# logout
logout(request)
username = postdata.get('fio', 'error!')
password = postdata.get('tab_number', 0)
User.objects.create_user(username, 'empty@surgpu.ru', password)
# login
new_user = authenticate(username=username, password=password)
if new_user:
login(request, new_user)
# amortization user
empl = Employee()
empl.tab_number = postdata.get('tab_number', 0)
empl.fio = postdata.get('fio', "error!")
empl.user = new_user
empl.post = postdata.get('post', '')
empl.cabinet = postdata.get('cabinet', '0-000')
empl.save()
uid = empl
else:
uid = empl[0]
user = authenticate(username=uid.user.username, password=uid.tab_number)
if user:
login(request, user)
req = Request()
req.user = uid
req.number = postdata.get('number', '000000000000')
req.device = postdata.get('device', 'NoName')
req.serial = postdata.get('serial', '')
req.year = postdata.get('year', '----')
req.save()
c['saved'] = True
else:
c['form'] = form
return HttpResponse(template.render(Context(c)))
|
normal
|
{
"blob_id": "11163dc99ee65ab44494c08d81e110e9c42390ae",
"index": 3130,
"step-1": "<mask token>\n\n\ndef main(request):\n c = base_context(request)\n template = get_template('index.html')\n c['title'] = _('Request')\n form = RequestForm()\n user = request.user\n c['user'] = user\n if user.is_authenticated():\n e = Employee.objects.filter(user=user)\n if e:\n empl = e[0]\n form = RequestForm(initial={'fio': empl.fio, 'tab_number': empl\n .tab_number, 'post': empl.post, 'cabinet': empl.cabinet})\n c['logout'] = True\n c['form'] = form\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = RequestForm(request.POST)\n if form.is_valid():\n empl = Employee.objects.filter(tab_number=postdata.get(\n 'tab_number', 0))\n if not empl:\n if user.is_authenticated():\n logout(request)\n username = postdata.get('fio', 'error!')\n password = postdata.get('tab_number', 0)\n User.objects.create_user(username, 'empty@surgpu.ru', password)\n new_user = authenticate(username=username, password=password)\n if new_user:\n login(request, new_user)\n empl = Employee()\n empl.tab_number = postdata.get('tab_number', 0)\n empl.fio = postdata.get('fio', 'error!')\n empl.user = new_user\n empl.post = postdata.get('post', '')\n empl.cabinet = postdata.get('cabinet', '0-000')\n empl.save()\n uid = empl\n else:\n uid = empl[0]\n user = authenticate(username=uid.user.username, password=\n uid.tab_number)\n if user:\n login(request, user)\n req = Request()\n req.user = uid\n req.number = postdata.get('number', '000000000000')\n req.device = postdata.get('device', 'NoName')\n req.serial = postdata.get('serial', '')\n req.year = postdata.get('year', '----')\n req.save()\n c['saved'] = True\n else:\n c['form'] = form\n return HttpResponse(template.render(Context(c)))\n",
"step-2": "<mask token>\n\n\ndef base_context(request):\n author = settings.ADMINS[0][0]\n version = settings.VERSION\n csrf_token = get_token(request)\n media_url = settings.MEDIA_URL\n app_name = _('Amortization & Expertise')\n path = request.path\n logout = False\n employee = None\n usr = request.user\n menu = MenuItem.objects.filter(for_staff=False).order_by('order')\n if usr.is_authenticated():\n logout = True\n employee = Employee.objects.filter(user=usr)\n if employee:\n employee = employee[0]\n if employee.user.is_staff:\n menu = MenuItem.objects.order_by('order')\n return locals()\n\n\ndef main(request):\n c = base_context(request)\n template = get_template('index.html')\n c['title'] = _('Request')\n form = RequestForm()\n user = request.user\n c['user'] = user\n if user.is_authenticated():\n e = Employee.objects.filter(user=user)\n if e:\n empl = e[0]\n form = RequestForm(initial={'fio': empl.fio, 'tab_number': empl\n .tab_number, 'post': empl.post, 'cabinet': empl.cabinet})\n c['logout'] = True\n c['form'] = form\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = RequestForm(request.POST)\n if form.is_valid():\n empl = Employee.objects.filter(tab_number=postdata.get(\n 'tab_number', 0))\n if not empl:\n if user.is_authenticated():\n logout(request)\n username = postdata.get('fio', 'error!')\n password = postdata.get('tab_number', 0)\n User.objects.create_user(username, 'empty@surgpu.ru', password)\n new_user = authenticate(username=username, password=password)\n if new_user:\n login(request, new_user)\n empl = Employee()\n empl.tab_number = postdata.get('tab_number', 0)\n empl.fio = postdata.get('fio', 'error!')\n empl.user = new_user\n empl.post = postdata.get('post', '')\n empl.cabinet = postdata.get('cabinet', '0-000')\n empl.save()\n uid = empl\n else:\n uid = empl[0]\n user = authenticate(username=uid.user.username, password=\n uid.tab_number)\n if user:\n login(request, user)\n req = Request()\n req.user = uid\n req.number = postdata.get('number', '000000000000')\n req.device = postdata.get('device', 'NoName')\n req.serial = postdata.get('serial', '')\n req.year = postdata.get('year', '----')\n req.save()\n c['saved'] = True\n else:\n c['form'] = form\n return HttpResponse(template.render(Context(c)))\n",
"step-3": "<mask token>\n__author__ = 'cm'\n<mask token>\n\n\ndef base_context(request):\n author = settings.ADMINS[0][0]\n version = settings.VERSION\n csrf_token = get_token(request)\n media_url = settings.MEDIA_URL\n app_name = _('Amortization & Expertise')\n path = request.path\n logout = False\n employee = None\n usr = request.user\n menu = MenuItem.objects.filter(for_staff=False).order_by('order')\n if usr.is_authenticated():\n logout = True\n employee = Employee.objects.filter(user=usr)\n if employee:\n employee = employee[0]\n if employee.user.is_staff:\n menu = MenuItem.objects.order_by('order')\n return locals()\n\n\ndef main(request):\n c = base_context(request)\n template = get_template('index.html')\n c['title'] = _('Request')\n form = RequestForm()\n user = request.user\n c['user'] = user\n if user.is_authenticated():\n e = Employee.objects.filter(user=user)\n if e:\n empl = e[0]\n form = RequestForm(initial={'fio': empl.fio, 'tab_number': empl\n .tab_number, 'post': empl.post, 'cabinet': empl.cabinet})\n c['logout'] = True\n c['form'] = form\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = RequestForm(request.POST)\n if form.is_valid():\n empl = Employee.objects.filter(tab_number=postdata.get(\n 'tab_number', 0))\n if not empl:\n if user.is_authenticated():\n logout(request)\n username = postdata.get('fio', 'error!')\n password = postdata.get('tab_number', 0)\n User.objects.create_user(username, 'empty@surgpu.ru', password)\n new_user = authenticate(username=username, password=password)\n if new_user:\n login(request, new_user)\n empl = Employee()\n empl.tab_number = postdata.get('tab_number', 0)\n empl.fio = postdata.get('fio', 'error!')\n empl.user = new_user\n empl.post = postdata.get('post', '')\n empl.cabinet = postdata.get('cabinet', '0-000')\n empl.save()\n uid = empl\n else:\n uid = empl[0]\n user = authenticate(username=uid.user.username, password=\n uid.tab_number)\n if user:\n login(request, user)\n req = Request()\n req.user = uid\n req.number = postdata.get('number', '000000000000')\n req.device = postdata.get('device', 'NoName')\n req.serial = postdata.get('serial', '')\n req.year = postdata.get('year', '----')\n req.save()\n c['saved'] = True\n else:\n c['form'] = form\n return HttpResponse(template.render(Context(c)))\n",
"step-4": "from django.contrib.auth import logout, login, authenticate\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.middleware.csrf import get_token\nfrom django.template.context import Context\nfrom django.utils.translation import ugettext_lazy as _\nfrom account.models import Employee\nfrom amortization import settings\nfrom models import MenuItem\nfrom task.forms import RequestForm\nfrom task.models import Request, Task\n__author__ = 'cm'\nfrom django.template.loader import get_template\n\n\ndef base_context(request):\n author = settings.ADMINS[0][0]\n version = settings.VERSION\n csrf_token = get_token(request)\n media_url = settings.MEDIA_URL\n app_name = _('Amortization & Expertise')\n path = request.path\n logout = False\n employee = None\n usr = request.user\n menu = MenuItem.objects.filter(for_staff=False).order_by('order')\n if usr.is_authenticated():\n logout = True\n employee = Employee.objects.filter(user=usr)\n if employee:\n employee = employee[0]\n if employee.user.is_staff:\n menu = MenuItem.objects.order_by('order')\n return locals()\n\n\ndef main(request):\n c = base_context(request)\n template = get_template('index.html')\n c['title'] = _('Request')\n form = RequestForm()\n user = request.user\n c['user'] = user\n if user.is_authenticated():\n e = Employee.objects.filter(user=user)\n if e:\n empl = e[0]\n form = RequestForm(initial={'fio': empl.fio, 'tab_number': empl\n .tab_number, 'post': empl.post, 'cabinet': empl.cabinet})\n c['logout'] = True\n c['form'] = form\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = RequestForm(request.POST)\n if form.is_valid():\n empl = Employee.objects.filter(tab_number=postdata.get(\n 'tab_number', 0))\n if not empl:\n if user.is_authenticated():\n logout(request)\n username = postdata.get('fio', 'error!')\n password = postdata.get('tab_number', 0)\n User.objects.create_user(username, 'empty@surgpu.ru', password)\n new_user = authenticate(username=username, password=password)\n if new_user:\n login(request, new_user)\n empl = Employee()\n empl.tab_number = postdata.get('tab_number', 0)\n empl.fio = postdata.get('fio', 'error!')\n empl.user = new_user\n empl.post = postdata.get('post', '')\n empl.cabinet = postdata.get('cabinet', '0-000')\n empl.save()\n uid = empl\n else:\n uid = empl[0]\n user = authenticate(username=uid.user.username, password=\n uid.tab_number)\n if user:\n login(request, user)\n req = Request()\n req.user = uid\n req.number = postdata.get('number', '000000000000')\n req.device = postdata.get('device', 'NoName')\n req.serial = postdata.get('serial', '')\n req.year = postdata.get('year', '----')\n req.save()\n c['saved'] = True\n else:\n c['form'] = form\n return HttpResponse(template.render(Context(c)))\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.contrib.auth import logout, login, authenticate\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.middleware.csrf import get_token\nfrom django.template.context import Context\nfrom django.utils.translation import ugettext_lazy as _\nfrom account.models import Employee\nfrom amortization import settings\nfrom models import MenuItem\nfrom task.forms import RequestForm\nfrom task.models import Request, Task\n\n__author__ = 'cm'\n\nfrom django.template.loader import get_template\n\ndef base_context(request):\n author = settings.ADMINS[0][0]\n version = settings.VERSION\n csrf_token = get_token(request)\n media_url = settings.MEDIA_URL\n app_name = _('Amortization & Expertise')\n path = request.path\n\n logout = False\n employee = None\n usr = request.user\n menu = MenuItem.objects.filter(for_staff=False).order_by('order')\n if usr.is_authenticated():\n logout = True\n employee = Employee.objects.filter(user=usr)\n if employee:\n employee = employee[0]\n if employee.user.is_staff:\n menu = MenuItem.objects.order_by('order')\n\n return locals()\n\ndef main(request):\n c = base_context(request)\n template = get_template(\"index.html\")\n c['title'] = _('Request')\n form = RequestForm()\n\n # if user is authenticated\n user = request.user\n c['user'] = user\n if user.is_authenticated():\n e = Employee.objects.filter(user=user)\n if e:\n empl = e[0]\n form = RequestForm(initial={'fio': empl.fio, 'tab_number': empl.tab_number, 'post': empl.post, 'cabinet': empl.cabinet})\n c['logout'] = True\n\n c['form'] = form\n\n if request.method == 'POST':\n postdata = request.POST.copy()\n form = RequestForm(request.POST)\n if form.is_valid():\n empl = Employee.objects.filter(tab_number = postdata.get('tab_number', 0))\n if not empl:\n # django user ---\n if user.is_authenticated():\n # logout\n logout(request)\n username = postdata.get('fio', 'error!')\n password = postdata.get('tab_number', 0)\n User.objects.create_user(username, 'empty@surgpu.ru', password)\n\n # login\n new_user = authenticate(username=username, password=password)\n if new_user:\n login(request, new_user)\n\n # amortization user\n empl = Employee()\n empl.tab_number = postdata.get('tab_number', 0)\n empl.fio = postdata.get('fio', \"error!\")\n empl.user = new_user\n empl.post = postdata.get('post', '')\n empl.cabinet = postdata.get('cabinet', '0-000')\n empl.save()\n uid = empl\n else:\n uid = empl[0]\n user = authenticate(username=uid.user.username, password=uid.tab_number)\n if user:\n login(request, user)\n\n req = Request()\n req.user = uid\n req.number = postdata.get('number', '000000000000')\n req.device = postdata.get('device', 'NoName')\n req.serial = postdata.get('serial', '')\n req.year = postdata.get('year', '----')\n req.save()\n c['saved'] = True\n\n else:\n c['form'] = form\n\n return HttpResponse(template.render(Context(c)))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MacAgeTime(A10BaseClass):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MacAgeTime(A10BaseClass):
<|reserved_special_token_0|>
def __init__(self, **kwargs):
self.ERROR_MSG = ''
self.required = []
self.b_key = 'mac-age-time'
self.a10_url = '/axapi/v3/mac-age-time'
self.DeviceProxy = ''
self.aging_time = ''
for keys, value in kwargs.items():
setattr(self, keys, value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MacAgeTime(A10BaseClass):
"""Class Description::
Set Aging period for all MAC Interfaces.
Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param aging_time: {"description": "Set aging period in seconds for all MAC interfaces (default 300 seconds)", "format": "number", "default": 300, "optional": true, "maximum": 600, "minimum": 10, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/mac-age-time`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ''
self.required = []
self.b_key = 'mac-age-time'
self.a10_url = '/axapi/v3/mac-age-time'
self.DeviceProxy = ''
self.aging_time = ''
for keys, value in kwargs.items():
setattr(self, keys, value)
<|reserved_special_token_1|>
from a10sdk.common.A10BaseClass import A10BaseClass
class MacAgeTime(A10BaseClass):
"""Class Description::
Set Aging period for all MAC Interfaces.
Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param aging_time: {"description": "Set aging period in seconds for all MAC interfaces (default 300 seconds)", "format": "number", "default": 300, "optional": true, "maximum": 600, "minimum": 10, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/mac-age-time`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ''
self.required = []
self.b_key = 'mac-age-time'
self.a10_url = '/axapi/v3/mac-age-time'
self.DeviceProxy = ''
self.aging_time = ''
for keys, value in kwargs.items():
setattr(self, keys, value)
<|reserved_special_token_1|>
from a10sdk.common.A10BaseClass import A10BaseClass
class MacAgeTime(A10BaseClass):
"""Class Description::
Set Aging period for all MAC Interfaces.
Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param aging_time: {"description": "Set aging period in seconds for all MAC interfaces (default 300 seconds)", "format": "number", "default": 300, "optional": true, "maximum": 600, "minimum": 10, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/mac-age-time`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "mac-age-time"
self.a10_url="/axapi/v3/mac-age-time"
self.DeviceProxy = ""
self.aging_time = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
flexible
|
{
"blob_id": "f08677430e54822abbce61d0cac5a6fea14d3872",
"index": 6078,
"step-1": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n <mask token>\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-3": "<mask token>\n\n\nclass MacAgeTime(A10BaseClass):\n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-4": "from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass MacAgeTime(A10BaseClass):\n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n\n def __init__(self, **kwargs):\n self.ERROR_MSG = ''\n self.required = []\n self.b_key = 'mac-age-time'\n self.a10_url = '/axapi/v3/mac-age-time'\n self.DeviceProxy = ''\n self.aging_time = ''\n for keys, value in kwargs.items():\n setattr(self, keys, value)\n",
"step-5": "from a10sdk.common.A10BaseClass import A10BaseClass\n\n\nclass MacAgeTime(A10BaseClass):\n \n \"\"\"Class Description::\n Set Aging period for all MAC Interfaces.\n\n Class mac-age-time supports CRUD Operations and inherits from `common/A10BaseClass`.\n This class is the `\"PARENT\"` class for this module.`\n\n :param aging_time: {\"description\": \"Set aging period in seconds for all MAC interfaces (default 300 seconds)\", \"format\": \"number\", \"default\": 300, \"optional\": true, \"maximum\": 600, \"minimum\": 10, \"type\": \"number\"}\n :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`\n\n \n\n URL for this object::\n `https://<Hostname|Ip address>//axapi/v3/mac-age-time`.\n\n \n\n \n \"\"\"\n def __init__(self, **kwargs):\n self.ERROR_MSG = \"\"\n self.required=[]\n self.b_key = \"mac-age-time\"\n self.a10_url=\"/axapi/v3/mac-age-time\"\n self.DeviceProxy = \"\"\n self.aging_time = \"\"\n\n for keys, value in kwargs.items():\n setattr(self,keys, value)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def normalise(image):
dbl_image = image.astype(float)
mean = np.mean(dbl_image)
iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),
cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 *
image.shape[1])
image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.
IPL_DEPTH_32F, 1)
cv2.cv.CvtScale(iplImage, image_32F)
cv2.cv.ConvertScale(image_32F, image_32F, 1 / mean, 0)
norm_im = np.asarray(image_32F[:, :])
return norm_im
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import cv2
import numpy as np
def normalise(image):
dbl_image = image.astype(float)
mean = np.mean(dbl_image)
iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),
cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 *
image.shape[1])
image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.
IPL_DEPTH_32F, 1)
cv2.cv.CvtScale(iplImage, image_32F)
cv2.cv.ConvertScale(image_32F, image_32F, 1 / mean, 0)
norm_im = np.asarray(image_32F[:, :])
return norm_im
<|reserved_special_token_1|>
'''
IplNorm.py
Description:
Normalizing 0 - 255 initial fingerprint to a normalized image.
Using energy normalization.
Input:
-image
Output:
-norm_im
@author: Edoardo Foco
'''
import cv2
import numpy as np
def normalise(image):
dbl_image = image.astype(float)
# calculate the mean of the image.
mean = np.mean(dbl_image)
# converting numpy 8-bit image to 8- bit cv2.iplimage
iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]), cv2.cv.IPL_DEPTH_8U, 1)
cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 * image.shape[1])
# initializing 32-bit floating point iplimage
image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.IPL_DEPTH_32F,1)
# converting 8-bit unsigned integer image to 32-bit floating point image
cv2.cv.CvtScale(iplImage,image_32F)
# energy Normalization. Formula: image = image/mean(image)
cv2.cv.ConvertScale(image_32F, image_32F, (1/mean), 0);
# re-converting to numpy image
norm_im = np.asarray(image_32F[:,:])
return norm_im
|
flexible
|
{
"blob_id": "f51d85ff352d9c84a8ded29ad94b24ca6dda46ad",
"index": 7593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalise(image):\n dbl_image = image.astype(float)\n mean = np.mean(dbl_image)\n iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),\n cv2.cv.IPL_DEPTH_8U, 1)\n cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 *\n image.shape[1])\n image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.\n IPL_DEPTH_32F, 1)\n cv2.cv.CvtScale(iplImage, image_32F)\n cv2.cv.ConvertScale(image_32F, image_32F, 1 / mean, 0)\n norm_im = np.asarray(image_32F[:, :])\n return norm_im\n",
"step-3": "<mask token>\nimport cv2\nimport numpy as np\n\n\ndef normalise(image):\n dbl_image = image.astype(float)\n mean = np.mean(dbl_image)\n iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),\n cv2.cv.IPL_DEPTH_8U, 1)\n cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 *\n image.shape[1])\n image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.\n IPL_DEPTH_32F, 1)\n cv2.cv.CvtScale(iplImage, image_32F)\n cv2.cv.ConvertScale(image_32F, image_32F, 1 / mean, 0)\n norm_im = np.asarray(image_32F[:, :])\n return norm_im\n",
"step-4": "\n'''\nIplNorm.py\nDescription: \n Normalizing 0 - 255 initial fingerprint to a normalized image.\n Using energy normalization.\n \n Input:\n -image\n \n Output:\n -norm_im\n@author: Edoardo Foco\n'''\n\nimport cv2\nimport numpy as np\n\ndef normalise(image):\n \n dbl_image = image.astype(float)\n # calculate the mean of the image.\n mean = np.mean(dbl_image)\n \n # converting numpy 8-bit image to 8- bit cv2.iplimage\n iplImage = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]), cv2.cv.IPL_DEPTH_8U, 1)\n cv2.cv.SetData(iplImage, image.tostring(), image.dtype.itemsize * 1 * image.shape[1])\n \n # initializing 32-bit floating point iplimage\n image_32F = cv2.cv.CreateImage(cv2.cv.GetSize(iplImage), cv2.cv.IPL_DEPTH_32F,1)\n \n # converting 8-bit unsigned integer image to 32-bit floating point image\n cv2.cv.CvtScale(iplImage,image_32F)\n \n # energy Normalization. Formula: image = image/mean(image)\n cv2.cv.ConvertScale(image_32F, image_32F, (1/mean), 0);\n \n # re-converting to numpy image\n norm_im = np.asarray(image_32F[:,:])\n \n return norm_im",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DuckList(generics.ListCreateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DuckList(generics.ListCreateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset, pk=self.kwargs['pk'])
return obj
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DuckList(generics.ListCreateAPIView):
queryset = Duck.objects.all()
serializer_class = Duck_Serializer
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset, pk=self.kwargs['pk'])
return obj
<|reserved_special_token_1|>
from django.shortcuts import get_object_or_404
from rest_framework import generics
from .models import Duck
from .serializers import Duck_Serializer
class DuckList(generics.ListCreateAPIView):
queryset = Duck.objects.all()
serializer_class = Duck_Serializer
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset, pk=self.kwargs['pk'])
return obj
<|reserved_special_token_1|>
from django.shortcuts import get_object_or_404
from rest_framework import generics
from .models import Duck
from .serializers import Duck_Serializer
class DuckList(generics.ListCreateAPIView):
queryset = Duck.objects.all()
serializer_class = Duck_Serializer
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(
queryset,
pk = self.kwargs['pk'],
)
return obj
|
flexible
|
{
"blob_id": "8334478c8b7fc7688477cdb837467e00e857c07c",
"index": 1196,
"step-1": "<mask token>\n\n\nclass DuckList(generics.ListCreateAPIView):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DuckList(generics.ListCreateAPIView):\n <mask token>\n <mask token>\n\n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=self.kwargs['pk'])\n return obj\n",
"step-3": "<mask token>\n\n\nclass DuckList(generics.ListCreateAPIView):\n queryset = Duck.objects.all()\n serializer_class = Duck_Serializer\n\n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=self.kwargs['pk'])\n return obj\n",
"step-4": "from django.shortcuts import get_object_or_404\nfrom rest_framework import generics\nfrom .models import Duck\nfrom .serializers import Duck_Serializer\n\n\nclass DuckList(generics.ListCreateAPIView):\n queryset = Duck.objects.all()\n serializer_class = Duck_Serializer\n\n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=self.kwargs['pk'])\n return obj\n",
"step-5": "from django.shortcuts import get_object_or_404\nfrom rest_framework import generics\nfrom .models import Duck\nfrom .serializers import Duck_Serializer\n\nclass DuckList(generics.ListCreateAPIView):\n\tqueryset = Duck.objects.all()\n\tserializer_class = Duck_Serializer\n\n\tdef get_object(self):\n\t\tqueryset = self.get_queryset()\n\t\tobj = get_object_or_404(\n\t\t\tqueryset,\n\t\t\tpk = self.kwargs['pk'],\n\t\t)\n\t\treturn obj\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -* coding: utf-8 -*-
# A headless media player based on gstreamer.
from gi.repository import Gst
Gst.init(None)
class Player:
def __init__(self, uri=None):
# Creates a playbin (plays media from an uri).
self.player = Gst.ElementFactory.make('playbin', 'player')
self.uri = uri
@property
def uri(self):
return self._uri
@uri.setter
def uri(self, value):
self._uri = value
self.player.set_state(Gst.State.NULL)
if value:
self.player.set_property('uri', value)
def play(self):
"""Start playing"""
self.player.set_state(Gst.State.PLAYING)
def pause(self):
"""Pause playing"""
self.player.set_state(Gst.State.PAUSED)
def stop(self):
self.player.set_state(Gst.State.NULL)
|
normal
|
{
"blob_id": "01e9ceb516a323a2017c65e368da419c6570dce2",
"index": 7304,
"step-1": "<mask token>\n\n\nclass Player:\n <mask token>\n\n @property\n def uri(self):\n return self._uri\n\n @uri.setter\n def uri(self, value):\n self._uri = value\n self.player.set_state(Gst.State.NULL)\n if value:\n self.player.set_property('uri', value)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Player:\n\n def __init__(self, uri=None):\n self.player = Gst.ElementFactory.make('playbin', 'player')\n self.uri = uri\n\n @property\n def uri(self):\n return self._uri\n\n @uri.setter\n def uri(self, value):\n self._uri = value\n self.player.set_state(Gst.State.NULL)\n if value:\n self.player.set_property('uri', value)\n\n def play(self):\n \"\"\"Start playing\"\"\"\n self.player.set_state(Gst.State.PLAYING)\n <mask token>\n\n def stop(self):\n self.player.set_state(Gst.State.NULL)\n",
"step-3": "<mask token>\nGst.init(None)\n\n\nclass Player:\n\n def __init__(self, uri=None):\n self.player = Gst.ElementFactory.make('playbin', 'player')\n self.uri = uri\n\n @property\n def uri(self):\n return self._uri\n\n @uri.setter\n def uri(self, value):\n self._uri = value\n self.player.set_state(Gst.State.NULL)\n if value:\n self.player.set_property('uri', value)\n\n def play(self):\n \"\"\"Start playing\"\"\"\n self.player.set_state(Gst.State.PLAYING)\n\n def pause(self):\n \"\"\"Pause playing\"\"\"\n self.player.set_state(Gst.State.PAUSED)\n\n def stop(self):\n self.player.set_state(Gst.State.NULL)\n",
"step-4": "from gi.repository import Gst\nGst.init(None)\n\n\nclass Player:\n\n def __init__(self, uri=None):\n self.player = Gst.ElementFactory.make('playbin', 'player')\n self.uri = uri\n\n @property\n def uri(self):\n return self._uri\n\n @uri.setter\n def uri(self, value):\n self._uri = value\n self.player.set_state(Gst.State.NULL)\n if value:\n self.player.set_property('uri', value)\n\n def play(self):\n \"\"\"Start playing\"\"\"\n self.player.set_state(Gst.State.PLAYING)\n\n def pause(self):\n \"\"\"Pause playing\"\"\"\n self.player.set_state(Gst.State.PAUSED)\n\n def stop(self):\n self.player.set_state(Gst.State.NULL)\n",
"step-5": "# -* coding: utf-8 -*-\n# A headless media player based on gstreamer.\n\nfrom gi.repository import Gst\nGst.init(None)\n\n\nclass Player:\n def __init__(self, uri=None):\n # Creates a playbin (plays media from an uri).\n self.player = Gst.ElementFactory.make('playbin', 'player')\n\n self.uri = uri\n\n @property\n def uri(self):\n return self._uri\n\n @uri.setter\n def uri(self, value):\n self._uri = value\n self.player.set_state(Gst.State.NULL)\n if value:\n self.player.set_property('uri', value)\n\n def play(self):\n \"\"\"Start playing\"\"\"\n self.player.set_state(Gst.State.PLAYING)\n\n def pause(self):\n \"\"\"Pause playing\"\"\"\n self.player.set_state(Gst.State.PAUSED)\n\n def stop(self):\n self.player.set_state(Gst.State.NULL)\n",
"step-ids": [
3,
6,
8,
9,
10
]
}
|
[
3,
6,
8,
9,
10
] |
import sys
import os.path
root_dir = os.path.dirname(os.path.dirname(__file__))
jsondb_dir = os.path.join(root_dir, 'jsondb')
sys.path.append(jsondb_dir)
|
normal
|
{
"blob_id": "eeb588a162fa222c0f70eb832a0026d0d8adbe9b",
"index": 6769,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(jsondb_dir)\n",
"step-3": "<mask token>\nroot_dir = os.path.dirname(os.path.dirname(__file__))\njsondb_dir = os.path.join(root_dir, 'jsondb')\nsys.path.append(jsondb_dir)\n",
"step-4": "import sys\nimport os.path\nroot_dir = os.path.dirname(os.path.dirname(__file__))\njsondb_dir = os.path.join(root_dir, 'jsondb')\nsys.path.append(jsondb_dir)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from pages.blog.models import Blog
admin.site.register(Blog)
|
normal
|
{
"blob_id": "534aaf8371707089522af014a93f3ff6c4f913ff",
"index": 8510,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Blog)\n",
"step-3": "from django.contrib import admin\nfrom pages.blog.models import Blog\nadmin.site.register(Blog)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import cherrypy
import config
try:
from simplejson import json
except ImportError:
import json
import routes
import urllib
import re
def redirect(url, status=None):
"""Raise a redirect to the specified address.
"""
raise cherrypy.HTTPRedirect(url, status)
def require_method(*allowed_methods):
allowed_methods = list(allowed_methods)
if "GET" in allowed_methods:
if "HEAD" not in allowed_methods:
allowed_methods.append("HEAD")
allowed_methods.sort()
if cherrypy.request.method not in allowed_methods:
cherrypy.response.headers['Allow'] = ", ".join(allowed_methods)
raise cherrypy.HTTPError(405)
def gonext():
"""Redirect to the url specified by the "next" parameter, if there is one.
"""
next = cherrypy.request.params.get('next', '')
if next != '':
redirect(next)
def url(*args, **kwargs):
"""Get the url for a given route.
"""
if len(args) == 0 and len(kwargs) == 0:
return cherrypy.url()
# First read the old args
newkwargs = dict(
(k, v[3:]) for (k, v) in kwargs.iteritems()
if v is not None and k.startswith('old')
)
# Apply neither new nor old args
for (k, v) in kwargs.iteritems():
if k.startswith('new') or k.startswith('old'):
continue
if v is None:
try:
del newkwargs[k]
except KeyError: pass
else:
newkwargs[k] = v
# Apply new args
for (k, v) in kwargs.iteritems():
if k[:3] != 'new':
continue
k = k[3:]
if v is None:
try:
del newkwargs[k]
except KeyError: pass
else:
newkwargs[k] = v
if len(args) > 0 and args[0] == 'static':
return config.STATIC_ASSETS_URL + '/'.join(args[1:])
return cherrypy.url(routes.url_for(*args, **newkwargs))
def queryparams(*args, **kwargs):
"""Encode a set of arguments as query parameters.
"""
args = dict(args)
args.update(kwargs)
return urllib.urlencode(args)
def get_or_404(cls, id):
try:
return cls.objects.get(unicode(id))
except KeyError:
raise cherrypy.NotFound
def locked(fn):
"""Decorator to ensure that the mutex is locked while calling a method.
The method's object must have a mutex in a property named "mutex".
"""
def locked_method(self, *args, **kwargs):
self.mutex.acquire()
try:
return fn(self, *args, **kwargs)
finally:
self.mutex.release()
return locked_method
def get_user():
from apps.store.models import User
try:
user = User.objects.get(u'_')
except KeyError:
user = User(None)
user.id = u'_'
User.objects.set(user)
return user
def get_settings():
from apps.store.models import Settings
try:
settings = Settings.objects.get(u'_')
except KeyError:
settings = Settings(None)
settings.id = u'_'
settings.set_roots(config.default_media_roots)
Settings.objects.set(settings)
return settings
def listify(val):
"""Convert a value, as found in cherrypy parameters, into a list.
"""
if isinstance(val, basestring):
return [val]
if hasattr(val, '__iter__'):
return list(val)
return [val]
def listify_values(params):
"""Return a copy of a dict with values which were strings converted to
lists.
"""
return dict((k, listify(v)) for (k, v) in params.iteritems())
def getparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single item, or None if not
supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0: return v[0]
return default
def getintparam(name, default=None, stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
The query parameter is always returned as a single integer item, or None if
not supplied. If supplied multiple times, one of the values is returned.
"""
v = getparamlist(name, stash=stash, params=params)
if len(v) > 0: return int(v[0])
return default
def getparamlist(name, default=[], stash=None, params=None):
"""Get a query parameter, in a nice standardised way, with some special
handling for old and new values.
Returns a list of values.
"""
if params is None:
params = cherrypy.request.params
v = params.get("new" + name, None)
if v is None:
v = params.get(name, None)
if v is None:
v = params.get("old" + name, None)
if v is None:
return default
v = listify(v)
if stash is not None:
stash[str(name)] = v
return v
def getorderparam(name):
"""Get the sequence of numbers stored in a parameter.
The parameter should contain the numbers separated by commas.
If invalid entries are found, raises an HTTP 400 error.
"""
for num in cherrypy.request.params.get(name, '').split(','):
if num.strip() == '':
continue
try:
yield int(num)
except ValueError:
raise cherrypy.HTTPError(400)
def jsonresp(value):
"""Return a json formatted value, and set appropriate headers.
"""
body = (json.dumps(value),)
cherrypy.response.headers['Content-Type'] = 'application/json'
return body
def slugify(value):
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s\.-]', '_', value).strip().lower())
return re.sub('[-\s]+', '-', value)
|
normal
|
{
"blob_id": "dc28d8aa17347f07041ae218bbe4e1b0add27c24",
"index": 5669,
"step-1": "<mask token>\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\n<mask token>\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\n<mask token>\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\n<mask token>\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not\n None and k.startswith('old'))\n for k, v in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n for k, v in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\n\n<mask token>\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\n<mask token>\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\n<mask token>\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\\\w\\\\s\\\\.-]', '_', value).strip().lower())\n return re.sub('[-\\\\s]+', '-', value)\n",
"step-3": "<mask token>\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not\n None and k.startswith('old'))\n for k, v in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n for k, v in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\n\n<mask token>\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\ndef listify(val):\n \"\"\"Convert a value, as found in cherrypy parameters, into a list.\n\n \"\"\"\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]\n\n\ndef listify_values(params):\n \"\"\"Return a copy of a dict with values which were strings converted to\n lists.\n\n \"\"\"\n return dict((k, listify(v)) for k, v in params.iteritems())\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\ndef getintparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single integer item, or None if\n not supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return int(v[0])\n return default\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\\\w\\\\s\\\\.-]', '_', value).strip().lower())\n return re.sub('[-\\\\s]+', '-', value)\n",
"step-4": "import cherrypy\nimport config\ntry:\n from simplejson import json\nexcept ImportError:\n import json\nimport routes\nimport urllib\nimport re\n\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if 'GET' in allowed_methods:\n if 'HEAD' not in allowed_methods:\n allowed_methods.append('HEAD')\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = ', '.join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n newkwargs = dict((k, v[3:]) for k, v in kwargs.iteritems() if v is not\n None and k.startswith('old'))\n for k, v in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n for k, v in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError:\n pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\n\ndef queryparams(*args, **kwargs):\n \"\"\"Encode a set of arguments as query parameters.\n\n \"\"\"\n args = dict(args)\n args.update(kwargs)\n return urllib.urlencode(args)\n\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\n\ndef listify(val):\n \"\"\"Convert a value, as found in cherrypy parameters, into a list.\n\n \"\"\"\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]\n\n\ndef listify_values(params):\n \"\"\"Return a copy of a dict with values which were strings converted to\n lists.\n\n \"\"\"\n return dict((k, listify(v)) for k, v in params.iteritems())\n\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return v[0]\n return default\n\n\ndef getintparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single integer item, or None if\n not supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0:\n return int(v[0])\n return default\n\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get('new' + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get('old' + name, None)\n if v is None:\n return default\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = json.dumps(value),\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\\\w\\\\s\\\\.-]', '_', value).strip().lower())\n return re.sub('[-\\\\s]+', '-', value)\n",
"step-5": "import cherrypy\nimport config\ntry:\n from simplejson import json\nexcept ImportError:\n import json\nimport routes\nimport urllib\nimport re\n\ndef redirect(url, status=None):\n \"\"\"Raise a redirect to the specified address.\n\n \"\"\"\n raise cherrypy.HTTPRedirect(url, status)\n\ndef require_method(*allowed_methods):\n allowed_methods = list(allowed_methods)\n if \"GET\" in allowed_methods:\n if \"HEAD\" not in allowed_methods:\n allowed_methods.append(\"HEAD\")\n allowed_methods.sort()\n if cherrypy.request.method not in allowed_methods:\n cherrypy.response.headers['Allow'] = \", \".join(allowed_methods)\n raise cherrypy.HTTPError(405)\n\ndef gonext():\n \"\"\"Redirect to the url specified by the \"next\" parameter, if there is one.\n\n \"\"\"\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)\n\ndef url(*args, **kwargs):\n \"\"\"Get the url for a given route.\n\n \"\"\"\n if len(args) == 0 and len(kwargs) == 0:\n return cherrypy.url()\n # First read the old args\n newkwargs = dict(\n (k, v[3:]) for (k, v) in kwargs.iteritems()\n if v is not None and k.startswith('old')\n )\n # Apply neither new nor old args\n for (k, v) in kwargs.iteritems():\n if k.startswith('new') or k.startswith('old'):\n continue\n if v is None:\n try:\n del newkwargs[k]\n except KeyError: pass\n else:\n newkwargs[k] = v\n # Apply new args\n for (k, v) in kwargs.iteritems():\n if k[:3] != 'new':\n continue\n k = k[3:]\n if v is None:\n try:\n del newkwargs[k]\n except KeyError: pass\n else:\n newkwargs[k] = v\n if len(args) > 0 and args[0] == 'static':\n return config.STATIC_ASSETS_URL + '/'.join(args[1:])\n return cherrypy.url(routes.url_for(*args, **newkwargs))\n\ndef queryparams(*args, **kwargs):\n \"\"\"Encode a set of arguments as query parameters.\n\n \"\"\"\n args = dict(args)\n args.update(kwargs)\n return urllib.urlencode(args)\n\ndef get_or_404(cls, id):\n try:\n return cls.objects.get(unicode(id))\n except KeyError:\n raise cherrypy.NotFound\n\ndef locked(fn):\n \"\"\"Decorator to ensure that the mutex is locked while calling a method.\n\n The method's object must have a mutex in a property named \"mutex\".\n\n \"\"\"\n def locked_method(self, *args, **kwargs):\n self.mutex.acquire()\n try:\n return fn(self, *args, **kwargs)\n finally:\n self.mutex.release()\n return locked_method\n\ndef get_user():\n from apps.store.models import User\n try:\n user = User.objects.get(u'_')\n except KeyError:\n user = User(None)\n user.id = u'_'\n User.objects.set(user)\n return user\n\ndef get_settings():\n from apps.store.models import Settings\n try:\n settings = Settings.objects.get(u'_')\n except KeyError:\n settings = Settings(None)\n settings.id = u'_'\n settings.set_roots(config.default_media_roots)\n Settings.objects.set(settings)\n return settings\n\ndef listify(val):\n \"\"\"Convert a value, as found in cherrypy parameters, into a list.\n\n \"\"\"\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]\n\ndef listify_values(params):\n \"\"\"Return a copy of a dict with values which were strings converted to\n lists.\n\n \"\"\"\n return dict((k, listify(v)) for (k, v) in params.iteritems())\n\ndef getparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single item, or None if not\n supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return v[0]\n return default\n\ndef getintparam(name, default=None, stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n The query parameter is always returned as a single integer item, or None if\n not supplied. If supplied multiple times, one of the values is returned.\n\n \"\"\"\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return int(v[0])\n return default\n\ndef getparamlist(name, default=[], stash=None, params=None):\n \"\"\"Get a query parameter, in a nice standardised way, with some special\n handling for old and new values.\n\n Returns a list of values.\n\n \"\"\"\n if params is None:\n params = cherrypy.request.params\n v = params.get(\"new\" + name, None)\n if v is None:\n v = params.get(name, None)\n if v is None:\n v = params.get(\"old\" + name, None)\n\n if v is None:\n return default\n\n v = listify(v)\n if stash is not None:\n stash[str(name)] = v\n return v\n\ndef getorderparam(name):\n \"\"\"Get the sequence of numbers stored in a parameter.\n\n The parameter should contain the numbers separated by commas.\n If invalid entries are found, raises an HTTP 400 error.\n\n \"\"\"\n for num in cherrypy.request.params.get(name, '').split(','):\n if num.strip() == '':\n continue\n try:\n yield int(num)\n except ValueError:\n raise cherrypy.HTTPError(400)\n\ndef jsonresp(value):\n \"\"\"Return a json formatted value, and set appropriate headers.\n\n \"\"\"\n body = (json.dumps(value),)\n cherrypy.response.headers['Content-Type'] = 'application/json'\n return body\n\ndef slugify(value):\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s\\.-]', '_', value).strip().lower())\n return re.sub('[-\\s]+', '-', value)\n",
"step-ids": [
11,
13,
16,
19,
20
]
}
|
[
11,
13,
16,
19,
20
] |
class Type(object):
"""Type of values."""
def __init__(self, jtype):
self.jtype = jtype
def __repr__(self):
return self.jtype.toString()
def __str__(self):
return self.jtype.toPrettyString(False, False)
|
normal
|
{
"blob_id": "851162e6c40a9f4f82a983a84fd0b4d6a6a57412",
"index": 3675,
"step-1": "class Type(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.jtype.toPrettyString(False, False)\n",
"step-2": "class Type(object):\n <mask token>\n\n def __init__(self, jtype):\n self.jtype = jtype\n <mask token>\n\n def __str__(self):\n return self.jtype.toPrettyString(False, False)\n",
"step-3": "class Type(object):\n <mask token>\n\n def __init__(self, jtype):\n self.jtype = jtype\n\n def __repr__(self):\n return self.jtype.toString()\n\n def __str__(self):\n return self.jtype.toPrettyString(False, False)\n",
"step-4": "class Type(object):\n \"\"\"Type of values.\"\"\"\n\n def __init__(self, jtype):\n self.jtype = jtype\n\n def __repr__(self):\n return self.jtype.toString()\n\n def __str__(self):\n return self.jtype.toPrettyString(False, False)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_parent(parent, x):
if parent[x] == x:
return x
parent[x] = get_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = get_parent(parent, a)
b = get_parent(parent, b)
if a != b:
parent[b] = a
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(V + 1):
node.append(i)
for _ in range(E):
graphs.append(list(map(int, sys.stdin.readline().split())))
<|reserved_special_token_0|>
def get_parent(parent, x):
if parent[x] == x:
return x
parent[x] = get_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = get_parent(parent, a)
b = get_parent(parent, b)
if a != b:
parent[b] = a
<|reserved_special_token_0|>
while N < V - 1:
A, B, dist = graph[idx]
if get_parent(node, A) == get_parent(node, B):
idx += 1
continue
union_parent(node, A, B)
distance += dist
N += 1
idx += 1
print(distance)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
V, E = map(int, sys.stdin.readline().split())
node = []
graphs = []
for i in range(V + 1):
node.append(i)
for _ in range(E):
graphs.append(list(map(int, sys.stdin.readline().split())))
graph = sorted(graphs, key=lambda x: x[2])
def get_parent(parent, x):
if parent[x] == x:
return x
parent[x] = get_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = get_parent(parent, a)
b = get_parent(parent, b)
if a != b:
parent[b] = a
N = 0
distance = 0
idx = 0
while N < V - 1:
A, B, dist = graph[idx]
if get_parent(node, A) == get_parent(node, B):
idx += 1
continue
union_parent(node, A, B)
distance += dist
N += 1
idx += 1
print(distance)
<|reserved_special_token_1|>
import sys
V, E = map(int, sys.stdin.readline().split())
node = []
graphs = []
for i in range(V + 1):
node.append(i)
for _ in range(E):
graphs.append(list(map(int, sys.stdin.readline().split())))
graph = sorted(graphs, key=lambda x: x[2])
def get_parent(parent, x):
if parent[x] == x:
return x
parent[x] = get_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = get_parent(parent, a)
b = get_parent(parent, b)
if a != b:
parent[b] = a
N = 0
distance = 0
idx = 0
while N < V - 1:
A, B, dist = graph[idx]
if get_parent(node, A) == get_parent(node, B):
idx += 1
continue
union_parent(node, A, B)
distance += dist
N += 1
idx += 1
print(distance)
<|reserved_special_token_1|>
import sys
V, E = map(int, sys.stdin.readline().split())
node = []
graphs = []
for i in range(V+1):
node.append(i)
for _ in range(E):
graphs.append((list(map(int, sys.stdin.readline().split()))))
graph = sorted(graphs, key=lambda x: x[2])
def get_parent(parent, x):
if parent[x] == x:
return x
parent[x] = get_parent(parent, parent[x])
return parent[x]
def union_parent(parent, a, b):
a = get_parent(parent, a)
b = get_parent(parent, b)
if a != b:
parent[b] = a
N = 0
distance = 0
idx = 0
while N < V-1:
A, B, dist = graph[idx]
if get_parent(node, A) == get_parent(node, B):
idx += 1
continue
union_parent(node, A, B)
distance += dist
N += 1
idx += 1
print(distance)
|
flexible
|
{
"blob_id": "2e794e281c6f34858cd32725cdc454eb18c28892",
"index": 3415,
"step-1": "<mask token>\n\n\ndef get_parent(parent, x):\n if parent[x] == x:\n return x\n parent[x] = get_parent(parent, parent[x])\n return parent[x]\n\n\ndef union_parent(parent, a, b):\n a = get_parent(parent, a)\n b = get_parent(parent, b)\n if a != b:\n parent[b] = a\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor i in range(V + 1):\n node.append(i)\nfor _ in range(E):\n graphs.append(list(map(int, sys.stdin.readline().split())))\n<mask token>\n\n\ndef get_parent(parent, x):\n if parent[x] == x:\n return x\n parent[x] = get_parent(parent, parent[x])\n return parent[x]\n\n\ndef union_parent(parent, a, b):\n a = get_parent(parent, a)\n b = get_parent(parent, b)\n if a != b:\n parent[b] = a\n\n\n<mask token>\nwhile N < V - 1:\n A, B, dist = graph[idx]\n if get_parent(node, A) == get_parent(node, B):\n idx += 1\n continue\n union_parent(node, A, B)\n distance += dist\n N += 1\n idx += 1\nprint(distance)\n",
"step-3": "<mask token>\nV, E = map(int, sys.stdin.readline().split())\nnode = []\ngraphs = []\nfor i in range(V + 1):\n node.append(i)\nfor _ in range(E):\n graphs.append(list(map(int, sys.stdin.readline().split())))\ngraph = sorted(graphs, key=lambda x: x[2])\n\n\ndef get_parent(parent, x):\n if parent[x] == x:\n return x\n parent[x] = get_parent(parent, parent[x])\n return parent[x]\n\n\ndef union_parent(parent, a, b):\n a = get_parent(parent, a)\n b = get_parent(parent, b)\n if a != b:\n parent[b] = a\n\n\nN = 0\ndistance = 0\nidx = 0\nwhile N < V - 1:\n A, B, dist = graph[idx]\n if get_parent(node, A) == get_parent(node, B):\n idx += 1\n continue\n union_parent(node, A, B)\n distance += dist\n N += 1\n idx += 1\nprint(distance)\n",
"step-4": "import sys\nV, E = map(int, sys.stdin.readline().split())\nnode = []\ngraphs = []\nfor i in range(V + 1):\n node.append(i)\nfor _ in range(E):\n graphs.append(list(map(int, sys.stdin.readline().split())))\ngraph = sorted(graphs, key=lambda x: x[2])\n\n\ndef get_parent(parent, x):\n if parent[x] == x:\n return x\n parent[x] = get_parent(parent, parent[x])\n return parent[x]\n\n\ndef union_parent(parent, a, b):\n a = get_parent(parent, a)\n b = get_parent(parent, b)\n if a != b:\n parent[b] = a\n\n\nN = 0\ndistance = 0\nidx = 0\nwhile N < V - 1:\n A, B, dist = graph[idx]\n if get_parent(node, A) == get_parent(node, B):\n idx += 1\n continue\n union_parent(node, A, B)\n distance += dist\n N += 1\n idx += 1\nprint(distance)\n",
"step-5": "import sys\n\nV, E = map(int, sys.stdin.readline().split())\n\nnode = []\ngraphs = []\nfor i in range(V+1):\n node.append(i)\n\nfor _ in range(E):\n graphs.append((list(map(int, sys.stdin.readline().split()))))\n\ngraph = sorted(graphs, key=lambda x: x[2])\n\n\ndef get_parent(parent, x):\n if parent[x] == x:\n return x\n parent[x] = get_parent(parent, parent[x])\n return parent[x]\n\n\ndef union_parent(parent, a, b):\n a = get_parent(parent, a)\n b = get_parent(parent, b)\n if a != b:\n parent[b] = a\n\n\nN = 0\ndistance = 0\nidx = 0\nwhile N < V-1:\n A, B, dist = graph[idx]\n if get_parent(node, A) == get_parent(node, B):\n idx += 1\n continue\n\n union_parent(node, A, B)\n distance += dist\n N += 1\n idx += 1\n\nprint(distance)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Bot(dict):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Bot(dict):
def __init__(self):
self['getRayon'] = 0
self['getPosition'] = -1000, -1000
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self['getRayon']
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Bot(dict):
def __init__(self):
self['getRayon'] = 0
self['getPosition'] = -1000, -1000
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self['getRayon']
def getPosition(self):
return self['getPosition']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Bot(dict):
def __init__(self):
self['getRayon'] = 0
self['getPosition'] = -1000, -1000
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self['getRayon']
def getPosition(self):
return self['getPosition']
if __name__ == '__main__':
import sys
import os
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(FILE_DIR, '../../ia'))
sys.path.append(os.path.join(FILE_DIR, '../../libs'))
import time
from graphview import GraphView
from event.goals import navigation
from event import collision
filename = os.path.join(FILE_DIR, '../../ia/event/goals/navigation/map.xml'
)
try:
offset = sys.argv[1]
except:
offset = 0
start = time.time()
other_bot = Bot()
other_bot.name = 'other'
other_bot['getRayon'] = 200
used_bot = Bot()
used_bot.name = 'used'
used_bot['getRayon'] = 120
ennemy1 = Bot()
ennemy1.name = 'en1'
ennemy2 = Bot()
ennemy2.name = 'en2'
ennemy1['getPosition'] = 1800, 1500
ennemy1['getRayon'] = 200
ennemy2['getPosition'] = 2200, 500
ennemy1['getRayon'] = 120
ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2],
filename)
col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])
print('init time : %s' % (time.time() - start))
v = GraphView(ng, col, other_bot, used_bot)
v.mainloop()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
class Bot(dict):
def __init__(self):
self["getRayon"] = 0
self["getPosition"] = (-1000, -1000)
self.traj = []
def getTrajectoires(self):
return self.traj
def getRayon(self):
return self["getRayon"]
def getPosition(self):
return self["getPosition"]
if __name__ == "__main__":
import sys
import os
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(FILE_DIR, "../../ia"))
sys.path.append(os.path.join(FILE_DIR, "../../libs"))
import time
from graphview import GraphView
from event.goals import navigation
from event import collision
filename = os.path.join(FILE_DIR, "../../ia/event/goals/navigation/map.xml")
try:
offset = sys.argv[1]
except:
offset = 0
start = time.time()
other_bot = Bot()
other_bot.name = 'other'
other_bot["getRayon"] = 200
used_bot = Bot()
used_bot.name = 'used'
used_bot["getRayon"] = 120
ennemy1 = Bot()
ennemy1.name = 'en1'
ennemy2 = Bot()
ennemy2.name = 'en2'
ennemy1["getPosition"] = (1800, 1500)
ennemy1["getRayon"] = 200
ennemy2["getPosition"] = (2200, 500)
ennemy1["getRayon"] = 120
ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)
col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])
print("init time : %s" % (time.time() - start))
v = GraphView(ng, col, other_bot, used_bot)
v.mainloop()
|
flexible
|
{
"blob_id": "d178818faf5fb18f5da48c1e2cf7991600731d06",
"index": 4457,
"step-1": "class Bot(dict):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n\n def getPosition(self):\n return self['getPosition']\n\n\n<mask token>\n",
"step-4": "class Bot(dict):\n\n def __init__(self):\n self['getRayon'] = 0\n self['getPosition'] = -1000, -1000\n self.traj = []\n\n def getTrajectoires(self):\n return self.traj\n\n def getRayon(self):\n return self['getRayon']\n\n def getPosition(self):\n return self['getPosition']\n\n\nif __name__ == '__main__':\n import sys\n import os\n FILE_DIR = os.path.dirname(os.path.abspath(__file__))\n sys.path.append(os.path.join(FILE_DIR, '../../ia'))\n sys.path.append(os.path.join(FILE_DIR, '../../libs'))\n import time\n from graphview import GraphView\n from event.goals import navigation\n from event import collision\n filename = os.path.join(FILE_DIR, '../../ia/event/goals/navigation/map.xml'\n )\n try:\n offset = sys.argv[1]\n except:\n offset = 0\n start = time.time()\n other_bot = Bot()\n other_bot.name = 'other'\n other_bot['getRayon'] = 200\n used_bot = Bot()\n used_bot.name = 'used'\n used_bot['getRayon'] = 120\n ennemy1 = Bot()\n ennemy1.name = 'en1'\n ennemy2 = Bot()\n ennemy2.name = 'en2'\n ennemy1['getPosition'] = 1800, 1500\n ennemy1['getRayon'] = 200\n ennemy2['getPosition'] = 2200, 500\n ennemy1['getRayon'] = 120\n ng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2],\n filename)\n col = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])\n print('init time : %s' % (time.time() - start))\n v = GraphView(ng, col, other_bot, used_bot)\n v.mainloop()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\nclass Bot(dict):\n\tdef __init__(self):\n\t\tself[\"getRayon\"] = 0\n\t\tself[\"getPosition\"] = (-1000, -1000)\n\t\tself.traj = []\n\tdef getTrajectoires(self):\n\t\treturn self.traj\n\tdef getRayon(self):\n\t\treturn self[\"getRayon\"]\n\tdef getPosition(self):\n\t\treturn self[\"getPosition\"]\n\nif __name__ == \"__main__\":\n\timport sys\n\timport os\n\tFILE_DIR = os.path.dirname(os.path.abspath(__file__))\n\tsys.path.append(os.path.join(FILE_DIR, \"../../ia\"))\n\tsys.path.append(os.path.join(FILE_DIR, \"../../libs\"))\n\t\n\timport time\n\t\n\tfrom graphview import GraphView\n\tfrom event.goals import navigation\n\tfrom event import collision\n\t\n\tfilename = os.path.join(FILE_DIR, \"../../ia/event/goals/navigation/map.xml\")\n\ttry:\n\t\toffset = sys.argv[1]\n\texcept:\n\t\toffset = 0\n\tstart = time.time()\n\tother_bot = Bot()\n\tother_bot.name = 'other'\n\tother_bot[\"getRayon\"] = 200\n\tused_bot = Bot()\n\tused_bot.name = 'used'\n\tused_bot[\"getRayon\"] = 120\n\tennemy1 = Bot()\n\tennemy1.name = 'en1'\n\tennemy2 = Bot()\n\tennemy2.name = 'en2'\n\tennemy1[\"getPosition\"] = (1800, 1500)\n\tennemy1[\"getRayon\"] = 200\n\tennemy2[\"getPosition\"] = (2200, 500)\n\tennemy1[\"getRayon\"] = 120\n\tng = navigation.PathFinding([used_bot, other_bot, ennemy1, ennemy2], filename)\n\tcol = collision.Collision([used_bot, other_bot, ennemy1, ennemy2])\n\tprint(\"init time : %s\" % (time.time() - start))\n\t\n\tv = GraphView(ng, col, other_bot, used_bot)\n\tv.mainloop()\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
from .slinklist import SingleLinkedList
|
normal
|
{
"blob_id": "2a5d498a386190bdd2c05bc2b14db0fecd707162",
"index": 1128,
"step-1": "<mask token>\n",
"step-2": "from .slinklist import SingleLinkedList\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectTS(models.Model):
class Meta:
permissions = ('approve_project_ts', 'Can approve timesheet'),
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectTS(models.Model):
class Meta:
permissions = ('approve_project_ts', 'Can approve timesheet'),
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(User, related_name='project_ts_member',
limit_choices_to={'is_staff': True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils import timezone
from timesheets.models import TimeSheet
from channels import Group
class ProjectTS(models.Model):
class Meta:
permissions = ('approve_project_ts', 'Can approve timesheet'),
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(User, related_name='project_ts_member',
limit_choices_to={'is_staff': True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default='')
project_time_sheet = models.ForeignKey(ProjectTS, related_name=
'project_time_sheet')
project_leader = models.ForeignKey(User, related_name='pl',
limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default='')
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils import timezone
from timesheets.models import TimeSheet
from channels import Group
class ProjectTS(models.Model):
class Meta:
permissions = (
("approve_project_ts", "Can approve timesheet"),
)
pay_period_begin = models.DateField()
pay_period_end = models.DateField()
ambassador = models.ForeignKey(
User, related_name='project_ts_member',
limit_choices_to={'is_staff' : True})
ambassador_finalized = models.BooleanField(default=False)
final_approval = models.BooleanField(default=False)
date_submitted = models.DateTimeField(auto_now_add=True)
date_approved = models.DateTimeField(auto_now_add=True)
class ProjectTSEntry(models.Model):
description = models.CharField(max_length=150, default="")
project_time_sheet = models.ForeignKey(ProjectTS, related_name="project_time_sheet")
project_leader = models.ForeignKey(User, related_name="pl",
limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})
project_leader_verification = models.BooleanField(default=False)
title = models.CharField(max_length=16, default="")
total_time = models.IntegerField(default=0)
start_time = models.TimeField(default=timezone.now)
end_time = models.TimeField(default=timezone.now)
day = models.DateField(default=timezone.now)
def save(self, *args, **kwargs):
self.total_time = self.end_time.hour - self.start_time.hour
result = super(ProjectTSEntry, self).save(*args, **kwargs)
return result
|
flexible
|
{
"blob_id": "df39a97db25f03aca8ebd501283fd6a7c486db8c",
"index": 1243,
"step-1": "<mask token>\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-2": "<mask token>\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-3": "<mask token>\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(User, related_name='project_ts_member',\n limit_choices_to={'is_staff': True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom timesheets.models import TimeSheet\nfrom channels import Group\n\n\nclass ProjectTS(models.Model):\n\n\n class Meta:\n permissions = ('approve_project_ts', 'Can approve timesheet'),\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(User, related_name='project_ts_member',\n limit_choices_to={'is_staff': True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default='')\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\n 'project_time_sheet')\n project_leader = models.ForeignKey(User, related_name='pl',\n limit_choices_to={'is_staff': True, 'groups__name': 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default='')\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-5": "from __future__ import unicode_literals\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.utils import timezone\nfrom timesheets.models import TimeSheet\nfrom channels import Group\n\n\nclass ProjectTS(models.Model):\n class Meta:\n permissions = (\n (\"approve_project_ts\", \"Can approve timesheet\"),\n )\n\n pay_period_begin = models.DateField()\n pay_period_end = models.DateField()\n ambassador = models.ForeignKey(\n User, related_name='project_ts_member',\n limit_choices_to={'is_staff' : True})\n ambassador_finalized = models.BooleanField(default=False)\n final_approval = models.BooleanField(default=False)\n date_submitted = models.DateTimeField(auto_now_add=True)\n date_approved = models.DateTimeField(auto_now_add=True)\n\n\nclass ProjectTSEntry(models.Model):\n description = models.CharField(max_length=150, default=\"\")\n project_time_sheet = models.ForeignKey(ProjectTS, related_name=\"project_time_sheet\")\n project_leader = models.ForeignKey(User, related_name=\"pl\",\n limit_choices_to={'is_staff' : True, 'groups__name' : 'Team Leader'})\n project_leader_verification = models.BooleanField(default=False)\n title = models.CharField(max_length=16, default=\"\")\n total_time = models.IntegerField(default=0)\n start_time = models.TimeField(default=timezone.now)\n end_time = models.TimeField(default=timezone.now)\n day = models.DateField(default=timezone.now)\n\n def save(self, *args, **kwargs):\n self.total_time = self.end_time.hour - self.start_time.hour\n result = super(ProjectTSEntry, self).save(*args, **kwargs)\n return result\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import turtle
pen = turtle.Turtle()
def curve():
for i in range(200):
pen.right(1)
pen.forward(1)
def heart():
pen.fillcolor('yellow')
pen.begin_fill()
pen.left(140)
pen.forward(113)
curve()
pen.left(120)
curve()
pen.forward(112)
pen.end_fill()
heart()
|
normal
|
{
"blob_id": "fa925d0ef4f9df3fdf9a51c7fcc88933609bc9e3",
"index": 3980,
"step-1": "<mask token>\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n",
"step-3": "<mask token>\npen = turtle.Turtle()\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n",
"step-4": "import turtle\npen = turtle.Turtle()\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
{'fs': {'eci': {'info': {'name': 'Example closed Interface extension',
'version': '1.0', 'date': 'Sept. 22, 2016', 'author': 'Jeff Teeters',
'contact': 'jteeters@berkeley.edu', 'description':
'Extension defining a new closed Interface'}, 'schema': {
'MyClosedInterface/': {'merge': ['core:<Interface>/'], 'description':
'A new interface defined in extension e-closed-interface.py. This is closed (no new members can be added).'
, '_properties': {'closed': True}, 'attributes': {'foo': {'description':
'example text attributed for MyClosedInterface', 'data_type': 'text'}},
'bar': {'description':
'Example dataset included with MyClosedInterface', 'data_type': 'int',
'dimensions': ['num_measurements']}, 'bazc/': {'description':
'Example closed group in MyClosedInterface', '_properties': {'closed':
True}}, 'bazo/': {'description':
'Example open group in MyClosedInterface', '_properties': {'closed':
False}}}}}}}
<|reserved_special_token_1|>
# This defines a new interface, called MyClosedInterface
# which is closed (does not allow new members to be added).
# "eci" is the schema id for this extension.
{"fs": { "eci": {
"info": {
"name": "Example closed Interface extension",
"version": "1.0",
"date": "Sept. 22, 2016",
"author": "Jeff Teeters",
"contact": "jteeters@berkeley.edu",
"description": ("Extension defining a new closed Interface")
},
"schema": {
"MyClosedInterface/": {
"merge": ["core:<Interface>/"],
"description": ("A new interface defined in extension e-closed-interface.py."
" This is closed (no new members can be added)."),
"_properties": {"closed": True}, # specify that this group is closed (no new members can be added).
"attributes": {
"foo": {
"description": "example text attributed for MyClosedInterface",
"data_type": "text"}},
"bar": {
"description": ("Example dataset included with MyClosedInterface"),
"data_type": "int",
"dimensions": ["num_measurements"]},
"bazc/": {
"description": ("Example closed group in MyClosedInterface"),
# "_closed": True,
"_properties": {"closed": True}},
"bazo/": {
"description": ("Example open group in MyClosedInterface"),
# "_closed": False,
"_properties": {"closed": False}}
}
}
}}}
|
flexible
|
{
"blob_id": "892f90edbd8bd54841b815a6bc29d136c5e84a38",
"index": 7175,
"step-1": "<mask token>\n",
"step-2": "{'fs': {'eci': {'info': {'name': 'Example closed Interface extension',\n 'version': '1.0', 'date': 'Sept. 22, 2016', 'author': 'Jeff Teeters',\n 'contact': 'jteeters@berkeley.edu', 'description':\n 'Extension defining a new closed Interface'}, 'schema': {\n 'MyClosedInterface/': {'merge': ['core:<Interface>/'], 'description':\n 'A new interface defined in extension e-closed-interface.py. This is closed (no new members can be added).'\n , '_properties': {'closed': True}, 'attributes': {'foo': {'description':\n 'example text attributed for MyClosedInterface', 'data_type': 'text'}},\n 'bar': {'description':\n 'Example dataset included with MyClosedInterface', 'data_type': 'int',\n 'dimensions': ['num_measurements']}, 'bazc/': {'description':\n 'Example closed group in MyClosedInterface', '_properties': {'closed': \n True}}, 'bazo/': {'description':\n 'Example open group in MyClosedInterface', '_properties': {'closed': \n False}}}}}}}\n",
"step-3": "# This defines a new interface, called MyClosedInterface\n# which is closed (does not allow new members to be added).\n\n# \"eci\" is the schema id for this extension.\n\n{\"fs\": { \"eci\": {\n\n\"info\": {\n \"name\": \"Example closed Interface extension\",\n \"version\": \"1.0\",\n \"date\": \"Sept. 22, 2016\",\n \"author\": \"Jeff Teeters\",\n \"contact\": \"jteeters@berkeley.edu\",\n \"description\": (\"Extension defining a new closed Interface\")\n },\n \n\"schema\": {\n \"MyClosedInterface/\": {\n \"merge\": [\"core:<Interface>/\"],\n \"description\": (\"A new interface defined in extension e-closed-interface.py.\"\n \" This is closed (no new members can be added).\"),\n \"_properties\": {\"closed\": True}, # specify that this group is closed (no new members can be added).\n \"attributes\": {\n \"foo\": {\n \"description\": \"example text attributed for MyClosedInterface\",\n \"data_type\": \"text\"}},\n \"bar\": {\n \"description\": (\"Example dataset included with MyClosedInterface\"),\n \"data_type\": \"int\",\n \"dimensions\": [\"num_measurements\"]},\n \"bazc/\": {\n \"description\": (\"Example closed group in MyClosedInterface\"),\n # \"_closed\": True,\n \"_properties\": {\"closed\": True}},\n \"bazo/\": {\n \"description\": (\"Example open group in MyClosedInterface\"),\n # \"_closed\": False,\n \"_properties\": {\"closed\": False}}\n }\n}\n\n}}}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
dansfunctions - various useful functions in python
usage:
>>import dansfunctions
>>dansfunctions.fg # module of general mathematical, vector and string format functions
>>dansfunctions.fp # module of matplotlib shortcuts
>>dansfunctions.widgets # module of tkinter shortcuts
Requirements: numpy
Optional requirements: matplotlib, tkinter
"""
from . import functions_general as fg
try:
import matplotlib
matplotlib.use('TkAgg')
from . import functions_plotting as fp
except ImportError:
fp = None
print('Matplotlib may not be available')
try:
from .tkgui import basic_widgets as widgets
except ImportError:
widgets = None
print('tkinter may not be available')
def version_info():
return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)
def module_info():
import sys
out = 'Python version %s' % sys.version
out += '\n%s' % version_info()
# Modules
out += '\n numpy version: %s' % fg.np.__version__
try:
import matplotlib
out += '\nmatplotlib version: %s' % matplotlib.__version__
except ImportError:
out += '\nmatplotlib version: None'
try:
import tkinter
out += '\n tkinter version: %s' % tkinter.TkVersion
except ImportError:
out += '\n tkinter version: None'
return out
def check_general_functions():
print('dansfunctions/functions_general.py')
print('Version: %s (%s)' % (fg.__version__, fg.__date__))
print('Methods:')
print(fg.list_methods(fg, False))
def check_plotting_functions():
print('dansfunctions/functions_plotting.py')
if fp is None:
print('Matplotlib may not be available')
return
print('Version: %s (%s)' % (fp.__version__, fp.__date__))
print('Methods:')
print(fg.list_methods(fp, False))
def check_tkinter_functions():
print('dansfunctions/tkgui/basic_widgets.py')
if widgets is None:
print('tkinter may not be available')
return
print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))
print('Methods:')
print(fg.list_methods(widgets, False))
|
normal
|
{
"blob_id": "0f266db39988cfce475380036f4f4f5b1a1fee1a",
"index": 3647,
"step-1": "<mask token>\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\n<mask token>\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n",
"step-2": "<mask token>\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n",
"step-3": "<mask token>\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n",
"step-4": "<mask token>\nfrom . import functions_general as fg\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n",
"step-5": "\"\"\"\ndansfunctions - various useful functions in python\nusage:\n>>import dansfunctions\n>>dansfunctions.fg # module of general mathematical, vector and string format functions\n>>dansfunctions.fp # module of matplotlib shortcuts\n>>dansfunctions.widgets # module of tkinter shortcuts\n\nRequirements: numpy\nOptional requirements: matplotlib, tkinter\n\"\"\"\n\nfrom . import functions_general as fg\n\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\n\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n # Modules\n out += '\\n numpy version: %s' % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class Virus:
def __init__(self, _name, _age, _malignancy):
self.name = _name
self.age = _age
self.malignancy = _malignancy
def set_name(self, _name):
self.name = _name
def set_age(self, _age):
self.age = _age
def set_malignancy(self, _malignancy):
self.malignancy = _malignancy
def update(self):
self.age += 1
if self.age % 3 == 0:
self.malignancy += 1
if self.malignancy < 0:
self.malignancy = 0
if self.malignancy > 99:
self.malignancy = 99
def __str__(self):
return "Nama: {}; Usia: {}; Tingkat Keganasan: {}".format(self.name, str(self.age), str(self.malignancy))
if __name__ == "__main__":
tmp = input().split()
number_of_virus = int(tmp[0])
number_of_day = int(tmp[1])
viruses = []
for index_of_virus in range(0, number_of_virus):
tmp = input().split()
virus_name = tmp[0]
virus_age = int(tmp[1])
virus_malignancy = int(tmp[2])
tmp_virus = Virus(virus_name, virus_age, virus_malignancy)
viruses.append(tmp_virus)
for day in range(1, number_of_day + 1):
print("Hari #{}".format(str(day)))
for index_of_virus in range(0, len(viruses)):
viruses[index_of_virus].update()
print(viruses[index_of_virus])
|
normal
|
{
"blob_id": "49c3c3b8c4b097f520456736e31ac306a9f73ac7",
"index": 3544,
"step-1": "class Virus:\n\n def __init__(self, _name, _age, _malignancy):\n self.name = _name\n self.age = _age\n self.malignancy = _malignancy\n\n def set_name(self, _name):\n self.name = _name\n\n def set_age(self, _age):\n self.age = _age\n <mask token>\n\n def update(self):\n self.age += 1\n if self.age % 3 == 0:\n self.malignancy += 1\n if self.malignancy < 0:\n self.malignancy = 0\n if self.malignancy > 99:\n self.malignancy = 99\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Virus:\n\n def __init__(self, _name, _age, _malignancy):\n self.name = _name\n self.age = _age\n self.malignancy = _malignancy\n\n def set_name(self, _name):\n self.name = _name\n\n def set_age(self, _age):\n self.age = _age\n <mask token>\n\n def update(self):\n self.age += 1\n if self.age % 3 == 0:\n self.malignancy += 1\n if self.malignancy < 0:\n self.malignancy = 0\n if self.malignancy > 99:\n self.malignancy = 99\n\n def __str__(self):\n return 'Nama: {}; Usia: {}; Tingkat Keganasan: {}'.format(self.name,\n str(self.age), str(self.malignancy))\n\n\n<mask token>\n",
"step-3": "class Virus:\n\n def __init__(self, _name, _age, _malignancy):\n self.name = _name\n self.age = _age\n self.malignancy = _malignancy\n\n def set_name(self, _name):\n self.name = _name\n\n def set_age(self, _age):\n self.age = _age\n\n def set_malignancy(self, _malignancy):\n self.malignancy = _malignancy\n\n def update(self):\n self.age += 1\n if self.age % 3 == 0:\n self.malignancy += 1\n if self.malignancy < 0:\n self.malignancy = 0\n if self.malignancy > 99:\n self.malignancy = 99\n\n def __str__(self):\n return 'Nama: {}; Usia: {}; Tingkat Keganasan: {}'.format(self.name,\n str(self.age), str(self.malignancy))\n\n\n<mask token>\n",
"step-4": "class Virus:\n\n def __init__(self, _name, _age, _malignancy):\n self.name = _name\n self.age = _age\n self.malignancy = _malignancy\n\n def set_name(self, _name):\n self.name = _name\n\n def set_age(self, _age):\n self.age = _age\n\n def set_malignancy(self, _malignancy):\n self.malignancy = _malignancy\n\n def update(self):\n self.age += 1\n if self.age % 3 == 0:\n self.malignancy += 1\n if self.malignancy < 0:\n self.malignancy = 0\n if self.malignancy > 99:\n self.malignancy = 99\n\n def __str__(self):\n return 'Nama: {}; Usia: {}; Tingkat Keganasan: {}'.format(self.name,\n str(self.age), str(self.malignancy))\n\n\nif __name__ == '__main__':\n tmp = input().split()\n number_of_virus = int(tmp[0])\n number_of_day = int(tmp[1])\n viruses = []\n for index_of_virus in range(0, number_of_virus):\n tmp = input().split()\n virus_name = tmp[0]\n virus_age = int(tmp[1])\n virus_malignancy = int(tmp[2])\n tmp_virus = Virus(virus_name, virus_age, virus_malignancy)\n viruses.append(tmp_virus)\n for day in range(1, number_of_day + 1):\n print('Hari #{}'.format(str(day)))\n for index_of_virus in range(0, len(viruses)):\n viruses[index_of_virus].update()\n print(viruses[index_of_virus])\n",
"step-5": "\nclass Virus:\n def __init__(self, _name, _age, _malignancy):\n self.name = _name\n self.age = _age\n self.malignancy = _malignancy\n\n def set_name(self, _name):\n self.name = _name\n \n def set_age(self, _age):\n self.age = _age\n\n def set_malignancy(self, _malignancy):\n self.malignancy = _malignancy\n\n def update(self):\n self.age += 1\n\n if self.age % 3 == 0:\n self.malignancy += 1\n\n if self.malignancy < 0:\n self.malignancy = 0\n\n if self.malignancy > 99:\n self.malignancy = 99\n \n def __str__(self):\n return \"Nama: {}; Usia: {}; Tingkat Keganasan: {}\".format(self.name, str(self.age), str(self.malignancy))\n\nif __name__ == \"__main__\":\n tmp = input().split()\n number_of_virus = int(tmp[0])\n number_of_day = int(tmp[1])\n\n viruses = []\n for index_of_virus in range(0, number_of_virus):\n tmp = input().split()\n virus_name = tmp[0]\n virus_age = int(tmp[1])\n virus_malignancy = int(tmp[2])\n\n tmp_virus = Virus(virus_name, virus_age, virus_malignancy)\n\n viruses.append(tmp_virus)\n\n for day in range(1, number_of_day + 1):\n print(\"Hari #{}\".format(str(day)))\n\n for index_of_virus in range(0, len(viruses)):\n viruses[index_of_virus].update()\n \n print(viruses[index_of_virus])\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# Copyright (c) 2019 Jannika Lossner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR
class MultiSpeakerBRIR(SimpleFreeFieldHRIR):
name = "MultiSpeakerBRIR"
version = "0.3"
def __init__(self):
super().__init__()
self.default_objects["Receiver"]["count"] = 2
#self.default_data["IR"] = 1
self.conditions["must have 2 Receivers"] = lambda name, fixed, variances, count: name != "Receiver" or count == 2
self.conditions["must have Listener Up and View"] = lambda name, fixed, variances, count: name != "Listener" or ("Up" in fixed + variances and "View" in fixed + variances)
self.conditions["must have both Emitter View and Up or neither"] = lambda name, fixed, variances, count: name != "Emitter" or "View" not in fixed + variances or ("Up" in fixed + variances and "View" in fixed + variances)
def add_metadata(self, database):
super().add_metadata(database)
database.Data.Type = "FIRE"
database.Room.Type = "reverberant"
return
|
normal
|
{
"blob_id": "e30bd33ae18881307e7cf4f60d3c60eae91573bc",
"index": 181,
"step-1": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n <mask token>\n <mask token>\n <mask token>\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-2": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n <mask token>\n <mask token>\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-3": "<mask token>\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = 'MultiSpeakerBRIR'\n version = '0.3'\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-4": "from .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR\n\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = 'MultiSpeakerBRIR'\n version = '0.3'\n\n def __init__(self):\n super().__init__()\n self.default_objects['Receiver']['count'] = 2\n self.conditions['must have 2 Receivers'] = (lambda name, fixed,\n variances, count: name != 'Receiver' or count == 2)\n self.conditions['must have Listener Up and View'] = (lambda name,\n fixed, variances, count: name != 'Listener' or 'Up' in fixed +\n variances and 'View' in fixed + variances)\n (self.conditions['must have both Emitter View and Up or neither']) = (\n lambda name, fixed, variances, count: name != 'Emitter' or \n 'View' not in fixed + variances or 'Up' in fixed + variances and\n 'View' in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n database.Data.Type = 'FIRE'\n database.Room.Type = 'reverberant'\n return\n",
"step-5": "# Copyright (c) 2019 Jannika Lossner\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom .SimpleFreeFieldHRIR import SimpleFreeFieldHRIR\n\nclass MultiSpeakerBRIR(SimpleFreeFieldHRIR):\n name = \"MultiSpeakerBRIR\"\n version = \"0.3\"\n def __init__(self):\n super().__init__()\n self.default_objects[\"Receiver\"][\"count\"] = 2\n\n #self.default_data[\"IR\"] = 1\n\n self.conditions[\"must have 2 Receivers\"] = lambda name, fixed, variances, count: name != \"Receiver\" or count == 2\n self.conditions[\"must have Listener Up and View\"] = lambda name, fixed, variances, count: name != \"Listener\" or (\"Up\" in fixed + variances and \"View\" in fixed + variances)\n self.conditions[\"must have both Emitter View and Up or neither\"] = lambda name, fixed, variances, count: name != \"Emitter\" or \"View\" not in fixed + variances or (\"Up\" in fixed + variances and \"View\" in fixed + variances)\n\n def add_metadata(self, database):\n super().add_metadata(database)\n\n database.Data.Type = \"FIRE\"\n database.Room.Type = \"reverberant\"\n return\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dot.edge('BaseException', 'SystemExit')
dot.edge('BaseException', 'KeyboardInterrupt')
dot.edge('BaseException', 'GeneratorExit')
dot.edge('BaseException', 'Exception')
dot.edge('Exception', 'StopIteration')
dot.edge('Exception', 'StopAsyncIteration')
dot.edge('Exception', 'ArithmeticError')
dot.edge('ArithmeticError', 'FloatingPointError')
dot.edge('ArithmeticError', 'OverflowError')
dot.edge('ArithmeticError', 'ZeroDivisionError')
dot.edge('Exception', 'AssertionError')
dot.edge('Exception', 'AttributeError')
dot.edge('Exception', 'BufferError')
dot.edge('Exception', 'EOFError')
dot.edge('Exception', 'ImportError')
dot.edge('ImportError', 'ModuleNotFoundError')
dot.edge('Exception', 'LookupError')
dot.edge('LookupError', 'IndexError')
dot.edge('LookupError', 'KeyError')
dot.edge('Exception', 'MemoryError')
dot.edge('Exception', 'NameError')
dot.edge('NameError', 'UnboundLocalError')
dot.edge('Exception', 'OSError')
dot.edge('OSError', 'BlockingIOError')
dot.edge('OSError', 'ChildProcessError')
dot.edge('OSError', 'ConnectionError')
dot.edge('ConnectionError', 'BrokenPipeError')
dot.edge('ConnectionError', 'ConnectionAbortedError')
dot.edge('ConnectionError', 'ConnectionRefusedError')
dot.edge('ConnectionError', 'ConnectionResetError')
dot.edge('OSError', 'FileExistsError')
dot.edge('OSError', 'FileNotFoundError')
dot.edge('OSError', 'InterruptedError')
dot.edge('OSError', 'IsADirectoryError')
dot.edge('OSError', 'NotADirectoryError')
dot.edge('OSError', 'PermissionError')
dot.edge('OSError', 'ProcessLookupError')
dot.edge('OSError', 'TimeoutError')
dot.edge('Exception', 'ReferenceError')
dot.edge('Exception', 'RuntimeError')
dot.edge('RuntimeError', 'NotImplementedError')
dot.edge('RuntimeError', 'RecursionError')
dot.edge('Exception', 'SyntaxError')
dot.edge('SyntaxError', 'IndentationError')
dot.edge('SyntaxError', 'TabError')
dot.edge('Exception', 'SystemError')
dot.edge('Exception', 'TypeError')
dot.edge('Exception', 'ValueError')
dot.edge('ValueError', 'UnicodeError')
dot.edge('UnicodeError', 'UnicodeDecodeError')
dot.edge('UnicodeError', 'UnicodeEncodeError')
dot.edge('UnicodeError', 'UnicodeTranslateError')
<|reserved_special_token_0|>
with open('exceptions.dot', 'w') as dot_file:
dot_file.write(dot_source)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dot = Digraph()
dot.edge('BaseException', 'SystemExit')
dot.edge('BaseException', 'KeyboardInterrupt')
dot.edge('BaseException', 'GeneratorExit')
dot.edge('BaseException', 'Exception')
dot.edge('Exception', 'StopIteration')
dot.edge('Exception', 'StopAsyncIteration')
dot.edge('Exception', 'ArithmeticError')
dot.edge('ArithmeticError', 'FloatingPointError')
dot.edge('ArithmeticError', 'OverflowError')
dot.edge('ArithmeticError', 'ZeroDivisionError')
dot.edge('Exception', 'AssertionError')
dot.edge('Exception', 'AttributeError')
dot.edge('Exception', 'BufferError')
dot.edge('Exception', 'EOFError')
dot.edge('Exception', 'ImportError')
dot.edge('ImportError', 'ModuleNotFoundError')
dot.edge('Exception', 'LookupError')
dot.edge('LookupError', 'IndexError')
dot.edge('LookupError', 'KeyError')
dot.edge('Exception', 'MemoryError')
dot.edge('Exception', 'NameError')
dot.edge('NameError', 'UnboundLocalError')
dot.edge('Exception', 'OSError')
dot.edge('OSError', 'BlockingIOError')
dot.edge('OSError', 'ChildProcessError')
dot.edge('OSError', 'ConnectionError')
dot.edge('ConnectionError', 'BrokenPipeError')
dot.edge('ConnectionError', 'ConnectionAbortedError')
dot.edge('ConnectionError', 'ConnectionRefusedError')
dot.edge('ConnectionError', 'ConnectionResetError')
dot.edge('OSError', 'FileExistsError')
dot.edge('OSError', 'FileNotFoundError')
dot.edge('OSError', 'InterruptedError')
dot.edge('OSError', 'IsADirectoryError')
dot.edge('OSError', 'NotADirectoryError')
dot.edge('OSError', 'PermissionError')
dot.edge('OSError', 'ProcessLookupError')
dot.edge('OSError', 'TimeoutError')
dot.edge('Exception', 'ReferenceError')
dot.edge('Exception', 'RuntimeError')
dot.edge('RuntimeError', 'NotImplementedError')
dot.edge('RuntimeError', 'RecursionError')
dot.edge('Exception', 'SyntaxError')
dot.edge('SyntaxError', 'IndentationError')
dot.edge('SyntaxError', 'TabError')
dot.edge('Exception', 'SystemError')
dot.edge('Exception', 'TypeError')
dot.edge('Exception', 'ValueError')
dot.edge('ValueError', 'UnicodeError')
dot.edge('UnicodeError', 'UnicodeDecodeError')
dot.edge('UnicodeError', 'UnicodeEncodeError')
dot.edge('UnicodeError', 'UnicodeTranslateError')
dot_source = dot.source
with open('exceptions.dot', 'w') as dot_file:
dot_file.write(dot_source)
<|reserved_special_token_1|>
from graphviz import Digraph
dot = Digraph()
dot.edge('BaseException', 'SystemExit')
dot.edge('BaseException', 'KeyboardInterrupt')
dot.edge('BaseException', 'GeneratorExit')
dot.edge('BaseException', 'Exception')
dot.edge('Exception', 'StopIteration')
dot.edge('Exception', 'StopAsyncIteration')
dot.edge('Exception', 'ArithmeticError')
dot.edge('ArithmeticError', 'FloatingPointError')
dot.edge('ArithmeticError', 'OverflowError')
dot.edge('ArithmeticError', 'ZeroDivisionError')
dot.edge('Exception', 'AssertionError')
dot.edge('Exception', 'AttributeError')
dot.edge('Exception', 'BufferError')
dot.edge('Exception', 'EOFError')
dot.edge('Exception', 'ImportError')
dot.edge('ImportError', 'ModuleNotFoundError')
dot.edge('Exception', 'LookupError')
dot.edge('LookupError', 'IndexError')
dot.edge('LookupError', 'KeyError')
dot.edge('Exception', 'MemoryError')
dot.edge('Exception', 'NameError')
dot.edge('NameError', 'UnboundLocalError')
dot.edge('Exception', 'OSError')
dot.edge('OSError', 'BlockingIOError')
dot.edge('OSError', 'ChildProcessError')
dot.edge('OSError', 'ConnectionError')
dot.edge('ConnectionError', 'BrokenPipeError')
dot.edge('ConnectionError', 'ConnectionAbortedError')
dot.edge('ConnectionError', 'ConnectionRefusedError')
dot.edge('ConnectionError', 'ConnectionResetError')
dot.edge('OSError', 'FileExistsError')
dot.edge('OSError', 'FileNotFoundError')
dot.edge('OSError', 'InterruptedError')
dot.edge('OSError', 'IsADirectoryError')
dot.edge('OSError', 'NotADirectoryError')
dot.edge('OSError', 'PermissionError')
dot.edge('OSError', 'ProcessLookupError')
dot.edge('OSError', 'TimeoutError')
dot.edge('Exception', 'ReferenceError')
dot.edge('Exception', 'RuntimeError')
dot.edge('RuntimeError', 'NotImplementedError')
dot.edge('RuntimeError', 'RecursionError')
dot.edge('Exception', 'SyntaxError')
dot.edge('SyntaxError', 'IndentationError')
dot.edge('SyntaxError', 'TabError')
dot.edge('Exception', 'SystemError')
dot.edge('Exception', 'TypeError')
dot.edge('Exception', 'ValueError')
dot.edge('ValueError', 'UnicodeError')
dot.edge('UnicodeError', 'UnicodeDecodeError')
dot.edge('UnicodeError', 'UnicodeEncodeError')
dot.edge('UnicodeError', 'UnicodeTranslateError')
dot_source = dot.source
with open('exceptions.dot', 'w') as dot_file:
dot_file.write(dot_source)
<|reserved_special_token_1|>
from graphviz import Digraph
dot = Digraph()
dot.edge("BaseException", "SystemExit")
dot.edge("BaseException", "KeyboardInterrupt")
dot.edge("BaseException", "GeneratorExit")
dot.edge("BaseException", "Exception")
dot.edge("Exception", "StopIteration")
dot.edge("Exception", "StopAsyncIteration")
dot.edge("Exception", "ArithmeticError")
dot.edge("ArithmeticError", "FloatingPointError")
dot.edge("ArithmeticError", "OverflowError")
dot.edge("ArithmeticError", "ZeroDivisionError")
dot.edge("Exception", "AssertionError")
dot.edge("Exception", "AttributeError")
dot.edge("Exception", "BufferError")
dot.edge("Exception", "EOFError")
dot.edge("Exception", "ImportError")
dot.edge("ImportError", "ModuleNotFoundError")
dot.edge("Exception", "LookupError")
dot.edge("LookupError", "IndexError")
dot.edge("LookupError", "KeyError")
dot.edge("Exception", "MemoryError")
dot.edge("Exception", "NameError")
dot.edge("NameError", "UnboundLocalError")
dot.edge("Exception", "OSError")
dot.edge("OSError", "BlockingIOError")
dot.edge("OSError", "ChildProcessError")
dot.edge("OSError", "ConnectionError")
dot.edge("ConnectionError", "BrokenPipeError")
dot.edge("ConnectionError", "ConnectionAbortedError")
dot.edge("ConnectionError", "ConnectionRefusedError")
dot.edge("ConnectionError", "ConnectionResetError")
dot.edge("OSError", "FileExistsError")
dot.edge("OSError", "FileNotFoundError")
dot.edge("OSError", "InterruptedError")
dot.edge("OSError", "IsADirectoryError")
dot.edge("OSError", "NotADirectoryError")
dot.edge("OSError", "PermissionError")
dot.edge("OSError", "ProcessLookupError")
dot.edge("OSError", "TimeoutError")
dot.edge("Exception", "ReferenceError")
dot.edge("Exception", "RuntimeError")
dot.edge("RuntimeError", "NotImplementedError")
dot.edge("RuntimeError", "RecursionError")
dot.edge("Exception", "SyntaxError")
dot.edge("SyntaxError", "IndentationError")
dot.edge("SyntaxError", "TabError")
dot.edge("Exception", "SystemError")
dot.edge("Exception", "TypeError")
dot.edge("Exception", "ValueError")
dot.edge("ValueError", "UnicodeError")
dot.edge("UnicodeError", "UnicodeDecodeError")
dot.edge("UnicodeError", "UnicodeEncodeError")
dot.edge("UnicodeError", "UnicodeTranslateError")
dot_source = dot.source
with open("exceptions.dot", "w") as dot_file:
dot_file.write(dot_source)
|
flexible
|
{
"blob_id": "a7db627c49b53cd3a073d866a0373336a46b4053",
"index": 1088,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\n<mask token>\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"step-3": "<mask token>\ndot = Digraph()\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\ndot_source = dot.source\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"step-4": "from graphviz import Digraph\ndot = Digraph()\ndot.edge('BaseException', 'SystemExit')\ndot.edge('BaseException', 'KeyboardInterrupt')\ndot.edge('BaseException', 'GeneratorExit')\ndot.edge('BaseException', 'Exception')\ndot.edge('Exception', 'StopIteration')\ndot.edge('Exception', 'StopAsyncIteration')\ndot.edge('Exception', 'ArithmeticError')\ndot.edge('ArithmeticError', 'FloatingPointError')\ndot.edge('ArithmeticError', 'OverflowError')\ndot.edge('ArithmeticError', 'ZeroDivisionError')\ndot.edge('Exception', 'AssertionError')\ndot.edge('Exception', 'AttributeError')\ndot.edge('Exception', 'BufferError')\ndot.edge('Exception', 'EOFError')\ndot.edge('Exception', 'ImportError')\ndot.edge('ImportError', 'ModuleNotFoundError')\ndot.edge('Exception', 'LookupError')\ndot.edge('LookupError', 'IndexError')\ndot.edge('LookupError', 'KeyError')\ndot.edge('Exception', 'MemoryError')\ndot.edge('Exception', 'NameError')\ndot.edge('NameError', 'UnboundLocalError')\ndot.edge('Exception', 'OSError')\ndot.edge('OSError', 'BlockingIOError')\ndot.edge('OSError', 'ChildProcessError')\ndot.edge('OSError', 'ConnectionError')\ndot.edge('ConnectionError', 'BrokenPipeError')\ndot.edge('ConnectionError', 'ConnectionAbortedError')\ndot.edge('ConnectionError', 'ConnectionRefusedError')\ndot.edge('ConnectionError', 'ConnectionResetError')\ndot.edge('OSError', 'FileExistsError')\ndot.edge('OSError', 'FileNotFoundError')\ndot.edge('OSError', 'InterruptedError')\ndot.edge('OSError', 'IsADirectoryError')\ndot.edge('OSError', 'NotADirectoryError')\ndot.edge('OSError', 'PermissionError')\ndot.edge('OSError', 'ProcessLookupError')\ndot.edge('OSError', 'TimeoutError')\ndot.edge('Exception', 'ReferenceError')\ndot.edge('Exception', 'RuntimeError')\ndot.edge('RuntimeError', 'NotImplementedError')\ndot.edge('RuntimeError', 'RecursionError')\ndot.edge('Exception', 'SyntaxError')\ndot.edge('SyntaxError', 'IndentationError')\ndot.edge('SyntaxError', 'TabError')\ndot.edge('Exception', 'SystemError')\ndot.edge('Exception', 'TypeError')\ndot.edge('Exception', 'ValueError')\ndot.edge('ValueError', 'UnicodeError')\ndot.edge('UnicodeError', 'UnicodeDecodeError')\ndot.edge('UnicodeError', 'UnicodeEncodeError')\ndot.edge('UnicodeError', 'UnicodeTranslateError')\ndot_source = dot.source\nwith open('exceptions.dot', 'w') as dot_file:\n dot_file.write(dot_source)\n",
"step-5": "from graphviz import Digraph\n\ndot = Digraph()\n\ndot.edge(\"BaseException\", \"SystemExit\")\ndot.edge(\"BaseException\", \"KeyboardInterrupt\")\ndot.edge(\"BaseException\", \"GeneratorExit\")\ndot.edge(\"BaseException\", \"Exception\")\ndot.edge(\"Exception\", \"StopIteration\")\ndot.edge(\"Exception\", \"StopAsyncIteration\")\ndot.edge(\"Exception\", \"ArithmeticError\")\ndot.edge(\"ArithmeticError\", \"FloatingPointError\")\ndot.edge(\"ArithmeticError\", \"OverflowError\")\ndot.edge(\"ArithmeticError\", \"ZeroDivisionError\")\ndot.edge(\"Exception\", \"AssertionError\")\ndot.edge(\"Exception\", \"AttributeError\")\ndot.edge(\"Exception\", \"BufferError\")\ndot.edge(\"Exception\", \"EOFError\")\ndot.edge(\"Exception\", \"ImportError\")\ndot.edge(\"ImportError\", \"ModuleNotFoundError\")\ndot.edge(\"Exception\", \"LookupError\")\ndot.edge(\"LookupError\", \"IndexError\")\ndot.edge(\"LookupError\", \"KeyError\")\ndot.edge(\"Exception\", \"MemoryError\")\ndot.edge(\"Exception\", \"NameError\")\ndot.edge(\"NameError\", \"UnboundLocalError\")\ndot.edge(\"Exception\", \"OSError\")\ndot.edge(\"OSError\", \"BlockingIOError\")\ndot.edge(\"OSError\", \"ChildProcessError\")\ndot.edge(\"OSError\", \"ConnectionError\")\ndot.edge(\"ConnectionError\", \"BrokenPipeError\")\ndot.edge(\"ConnectionError\", \"ConnectionAbortedError\")\ndot.edge(\"ConnectionError\", \"ConnectionRefusedError\")\ndot.edge(\"ConnectionError\", \"ConnectionResetError\")\ndot.edge(\"OSError\", \"FileExistsError\")\ndot.edge(\"OSError\", \"FileNotFoundError\")\ndot.edge(\"OSError\", \"InterruptedError\")\ndot.edge(\"OSError\", \"IsADirectoryError\")\ndot.edge(\"OSError\", \"NotADirectoryError\")\ndot.edge(\"OSError\", \"PermissionError\")\ndot.edge(\"OSError\", \"ProcessLookupError\")\ndot.edge(\"OSError\", \"TimeoutError\")\ndot.edge(\"Exception\", \"ReferenceError\")\ndot.edge(\"Exception\", \"RuntimeError\")\ndot.edge(\"RuntimeError\", \"NotImplementedError\")\ndot.edge(\"RuntimeError\", \"RecursionError\")\ndot.edge(\"Exception\", \"SyntaxError\")\ndot.edge(\"SyntaxError\", \"IndentationError\")\ndot.edge(\"SyntaxError\", \"TabError\")\ndot.edge(\"Exception\", \"SystemError\")\ndot.edge(\"Exception\", \"TypeError\")\ndot.edge(\"Exception\", \"ValueError\")\ndot.edge(\"ValueError\", \"UnicodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeDecodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeEncodeError\")\ndot.edge(\"UnicodeError\", \"UnicodeTranslateError\")\n\ndot_source = dot.source\n\nwith open(\"exceptions.dot\", \"w\") as dot_file:\n dot_file.write(dot_source)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for seed in range(start_position, start_position + max_seeds):
cur_wins = 0
max_wins = 0
cur_losses = 0
max_losses = 0
win_streak = []
loss_streak = []
np.random.seed(seed)
start_time = timer()
start_bal = cur_bal = 0.001
for index in range(max_rolls):
bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(
'{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(
cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),
float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(
cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]
if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:
print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'
.format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))
print('Max_L: {}'.format(max_losses))
print('Max_W: {}'.format(max_wins))
seed_wins += 1
num_rolls.append(index)
break
if cur_losses < len(bets):
bet = bets[cur_losses]
else:
bet = bets[0]
if bet < bets[0]:
bet = bets[0]
if cur_bal <= 0:
break
if bet >= cur_bal:
break
roll = np.random.randint(1, 10000)
win = True if roll < 3900 else False
if win:
loss_streak.append(cur_losses)
cur_bal += bet * 2
cur_losses = 0
cur_wins += 1
else:
win_streak.append(cur_wins)
cur_bal -= bet
cur_losses += 1
cur_wins = 0
if cur_losses > max_losses:
max_losses = cur_losses
if cur_wins > max_wins:
max_wins = cur_wins
seed_time = timer() - start_time
print('Seed_time: {:.2f}'.format(seed_time), end='\r')
print('Won {}/{} Seeds'.format(seed_wins, max_seeds))
if seed_wins:
print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bets1 = [1e-08, 4e-08, 1e-07, 5e-07, 1.5e-06, 5e-06, 1e-05]
bets2 = [1e-07, 4e-07, 1e-06, 5e-06, 1.5e-05, 5e-05, 0.0001]
max_seeds = 100
max_rolls = 100000
seed_wins = 0
num_rolls = []
start_position = np.random.randint(1, 100000000)
for seed in range(start_position, start_position + max_seeds):
cur_wins = 0
max_wins = 0
cur_losses = 0
max_losses = 0
win_streak = []
loss_streak = []
np.random.seed(seed)
start_time = timer()
start_bal = cur_bal = 0.001
for index in range(max_rolls):
bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(
'{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(
cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),
float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(
cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]
if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:
print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'
.format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))
print('Max_L: {}'.format(max_losses))
print('Max_W: {}'.format(max_wins))
seed_wins += 1
num_rolls.append(index)
break
if cur_losses < len(bets):
bet = bets[cur_losses]
else:
bet = bets[0]
if bet < bets[0]:
bet = bets[0]
if cur_bal <= 0:
break
if bet >= cur_bal:
break
roll = np.random.randint(1, 10000)
win = True if roll < 3900 else False
if win:
loss_streak.append(cur_losses)
cur_bal += bet * 2
cur_losses = 0
cur_wins += 1
else:
win_streak.append(cur_wins)
cur_bal -= bet
cur_losses += 1
cur_wins = 0
if cur_losses > max_losses:
max_losses = cur_losses
if cur_wins > max_wins:
max_wins = cur_wins
seed_time = timer() - start_time
print('Seed_time: {:.2f}'.format(seed_time), end='\r')
print('Won {}/{} Seeds'.format(seed_wins, max_seeds))
if seed_wins:
print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))
)
<|reserved_special_token_1|>
from timeit import default_timer as timer
import numpy as np
bets1 = [1e-08, 4e-08, 1e-07, 5e-07, 1.5e-06, 5e-06, 1e-05]
bets2 = [1e-07, 4e-07, 1e-06, 5e-06, 1.5e-05, 5e-05, 0.0001]
max_seeds = 100
max_rolls = 100000
seed_wins = 0
num_rolls = []
start_position = np.random.randint(1, 100000000)
for seed in range(start_position, start_position + max_seeds):
cur_wins = 0
max_wins = 0
cur_losses = 0
max_losses = 0
win_streak = []
loss_streak = []
np.random.seed(seed)
start_time = timer()
start_bal = cur_bal = 0.001
for index in range(max_rolls):
bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(
'{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(
cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),
float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(
cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]
if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:
print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'
.format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))
print('Max_L: {}'.format(max_losses))
print('Max_W: {}'.format(max_wins))
seed_wins += 1
num_rolls.append(index)
break
if cur_losses < len(bets):
bet = bets[cur_losses]
else:
bet = bets[0]
if bet < bets[0]:
bet = bets[0]
if cur_bal <= 0:
break
if bet >= cur_bal:
break
roll = np.random.randint(1, 10000)
win = True if roll < 3900 else False
if win:
loss_streak.append(cur_losses)
cur_bal += bet * 2
cur_losses = 0
cur_wins += 1
else:
win_streak.append(cur_wins)
cur_bal -= bet
cur_losses += 1
cur_wins = 0
if cur_losses > max_losses:
max_losses = cur_losses
if cur_wins > max_wins:
max_wins = cur_wins
seed_time = timer() - start_time
print('Seed_time: {:.2f}'.format(seed_time), end='\r')
print('Won {}/{} Seeds'.format(seed_wins, max_seeds))
if seed_wins:
print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))
)
<|reserved_special_token_1|>
from timeit import default_timer as timer
import numpy as np
bets1 = [ # lowest config possible
0.00000001,
0.00000004,
0.0000001,
0.0000005,
0.00000150,
0.00000500,
0.00001000
]
bets2 = [ # 2 is 10x 1
0.0000001,
0.0000004,
0.000001,
0.000005,
0.0000150,
0.0000500,
0.0001000
]
# options
max_seeds = 100
max_rolls = 100000 # 100k is around 8-24 hours of fastplay
seed_wins = 0
num_rolls = []
start_position = np.random.randint(1, 100000000)
for seed in range(start_position, start_position+max_seeds):
# current game round stats
cur_wins = 0
max_wins = 0
cur_losses = 0
max_losses = 0
win_streak = []
loss_streak = []
# seed data and timer
np.random.seed(seed)
start_time = timer()
start_bal = cur_bal = 0.001 # 10$ reasonable start
# actual Play
for index in range(max_rolls):
# make bets
bets = [ # this appears to be working, a function of cur_bal
0.00000001,
float('{:.8f}'.format(cur_bal * 0.001)),
float('{:.8f}'.format(cur_bal * 0.002)),
float('{:.8f}'.format(cur_bal * 0.005)),
float('{:.8f}'.format(cur_bal * 0.01)),
float('{:.8f}'.format(cur_bal * 0.05)),
float('{:.8f}'.format(cur_bal * 0.12)),
float('{:.8f}'.format(cur_bal * 0.3)),
]
# if Winning... Stop
if (cur_bal / start_bal - 1)*100 > 10000 or index==max_rolls-1:
print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(
seed, index, cur_bal, (cur_bal/start_bal-1)*100))
print('Max_L: {}'.format(max_losses))
print('Max_W: {}'.format(max_wins))
#print('Won The Day!')
seed_wins += 1
num_rolls.append(index)
break
# get bet
if cur_losses < len(bets):
bet = bets[cur_losses]
else:
bet = bets[0]
if bet < bets[0]: # dont bet less than 8 decimal places
bet = bets[0]
# if Losing ... Stop
if cur_bal <= 0:
break
if bet >= cur_bal:
#print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(
# seed, index, cur_bal, (cur_bal/start_bal-1)*100))
#print('Game Over man!')
break
## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MAKE PLAY
roll = np.random.randint(1, 10000)
win = True if roll < 3900 else False ## 3900/10000 appears to be a good handicap
## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# fix balance
if win:
loss_streak.append(cur_losses)
cur_bal += bet * 2
cur_losses = 0
cur_wins += 1
else:
win_streak.append(cur_wins)
cur_bal -= bet
cur_losses += 1
cur_wins = 0
# fix maxes
if cur_losses > max_losses:
max_losses = cur_losses
if cur_wins > max_wins:
max_wins = cur_wins
# /actual play
# seed stuff
seed_time = timer() - start_time
print('Seed_time: {:.2f}'.format(seed_time), end='\r') # you will see this a lot if losing
# Finished All Seeds
print('Won {}/{} Seeds'.format(seed_wins,max_seeds))
if seed_wins: # if won anything.
print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean())))
|
flexible
|
{
"blob_id": "4c66ab6110e81bb88fc6916a1695e0f23e6e0e9d",
"index": 6754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor seed in range(start_position, start_position + max_seeds):\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n win_streak = []\n loss_streak = []\n np.random.seed(seed)\n start_time = timer()\n start_bal = cur_bal = 0.001\n for index in range(max_rolls):\n bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(\n '{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(\n cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(\n cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]\n if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'\n .format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n seed_wins += 1\n num_rolls.append(index)\n break\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]:\n bet = bets[0]\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n break\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r')\nprint('Won {}/{} Seeds'.format(seed_wins, max_seeds))\nif seed_wins:\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))\n )\n",
"step-3": "<mask token>\nbets1 = [1e-08, 4e-08, 1e-07, 5e-07, 1.5e-06, 5e-06, 1e-05]\nbets2 = [1e-07, 4e-07, 1e-06, 5e-06, 1.5e-05, 5e-05, 0.0001]\nmax_seeds = 100\nmax_rolls = 100000\nseed_wins = 0\nnum_rolls = []\nstart_position = np.random.randint(1, 100000000)\nfor seed in range(start_position, start_position + max_seeds):\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n win_streak = []\n loss_streak = []\n np.random.seed(seed)\n start_time = timer()\n start_bal = cur_bal = 0.001\n for index in range(max_rolls):\n bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(\n '{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(\n cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(\n cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]\n if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'\n .format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n seed_wins += 1\n num_rolls.append(index)\n break\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]:\n bet = bets[0]\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n break\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r')\nprint('Won {}/{} Seeds'.format(seed_wins, max_seeds))\nif seed_wins:\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))\n )\n",
"step-4": "from timeit import default_timer as timer\nimport numpy as np\nbets1 = [1e-08, 4e-08, 1e-07, 5e-07, 1.5e-06, 5e-06, 1e-05]\nbets2 = [1e-07, 4e-07, 1e-06, 5e-06, 1.5e-05, 5e-05, 0.0001]\nmax_seeds = 100\nmax_rolls = 100000\nseed_wins = 0\nnum_rolls = []\nstart_position = np.random.randint(1, 100000000)\nfor seed in range(start_position, start_position + max_seeds):\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n win_streak = []\n loss_streak = []\n np.random.seed(seed)\n start_time = timer()\n start_bal = cur_bal = 0.001\n for index in range(max_rolls):\n bets = [1e-08, float('{:.8f}'.format(cur_bal * 0.001)), float(\n '{:.8f}'.format(cur_bal * 0.002)), float('{:.8f}'.format(\n cur_bal * 0.005)), float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)), float('{:.8f}'.format(\n cur_bal * 0.12)), float('{:.8f}'.format(cur_bal * 0.3))]\n if (cur_bal / start_bal - 1) * 100 > 10000 or index == max_rolls - 1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'\n .format(seed, index, cur_bal, (cur_bal / start_bal - 1) * 100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n seed_wins += 1\n num_rolls.append(index)\n break\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]:\n bet = bets[0]\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n break\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r')\nprint('Won {}/{} Seeds'.format(seed_wins, max_seeds))\nif seed_wins:\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean()))\n )\n",
"step-5": "from timeit import default_timer as timer\nimport numpy as np\n\nbets1 = [ # lowest config possible\n 0.00000001,\n 0.00000004,\n 0.0000001,\n 0.0000005,\n 0.00000150,\n 0.00000500,\n 0.00001000\n]\nbets2 = [ # 2 is 10x 1\n 0.0000001,\n 0.0000004,\n 0.000001,\n 0.000005,\n 0.0000150,\n 0.0000500,\n 0.0001000\n]\n\n# options\nmax_seeds = 100\nmax_rolls = 100000 # 100k is around 8-24 hours of fastplay\n\nseed_wins = 0\nnum_rolls = []\nstart_position = np.random.randint(1, 100000000)\n\n\n\nfor seed in range(start_position, start_position+max_seeds):\n # current game round stats\n cur_wins = 0\n max_wins = 0\n cur_losses = 0\n max_losses = 0\n\n win_streak = []\n loss_streak = []\n # seed data and timer\n np.random.seed(seed)\n start_time = timer()\n\n start_bal = cur_bal = 0.001 # 10$ reasonable start\n # actual Play\n for index in range(max_rolls):\n # make bets\n bets = [ # this appears to be working, a function of cur_bal\n 0.00000001,\n float('{:.8f}'.format(cur_bal * 0.001)),\n float('{:.8f}'.format(cur_bal * 0.002)),\n float('{:.8f}'.format(cur_bal * 0.005)),\n float('{:.8f}'.format(cur_bal * 0.01)),\n float('{:.8f}'.format(cur_bal * 0.05)),\n float('{:.8f}'.format(cur_bal * 0.12)),\n float('{:.8f}'.format(cur_bal * 0.3)),\n ]\n\n # if Winning... Stop\n if (cur_bal / start_bal - 1)*100 > 10000 or index==max_rolls-1:\n print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(\n seed, index, cur_bal, (cur_bal/start_bal-1)*100))\n print('Max_L: {}'.format(max_losses))\n print('Max_W: {}'.format(max_wins))\n #print('Won The Day!')\n seed_wins += 1\n num_rolls.append(index)\n break\n\n # get bet\n if cur_losses < len(bets):\n bet = bets[cur_losses]\n else:\n bet = bets[0]\n if bet < bets[0]: # dont bet less than 8 decimal places\n bet = bets[0]\n\n # if Losing ... Stop\n if cur_bal <= 0:\n break\n if bet >= cur_bal:\n #print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(\n # seed, index, cur_bal, (cur_bal/start_bal-1)*100))\n #print('Game Over man!')\n break\n\n ## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MAKE PLAY\n roll = np.random.randint(1, 10000)\n win = True if roll < 3900 else False ## 3900/10000 appears to be a good handicap\n ## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n # fix balance\n if win:\n loss_streak.append(cur_losses)\n cur_bal += bet * 2\n cur_losses = 0\n cur_wins += 1\n else:\n win_streak.append(cur_wins)\n cur_bal -= bet\n cur_losses += 1\n cur_wins = 0\n\n # fix maxes\n if cur_losses > max_losses:\n max_losses = cur_losses\n if cur_wins > max_wins:\n max_wins = cur_wins\n # /actual play\n # seed stuff\n seed_time = timer() - start_time\n print('Seed_time: {:.2f}'.format(seed_time), end='\\r') # you will see this a lot if losing\n# Finished All Seeds\nprint('Won {}/{} Seeds'.format(seed_wins,max_seeds))\nif seed_wins: # if won anything.\n print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean())))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from django.utils.translation import gettext_lazy as _
import django_filters
from elasticsearch_dsl.query import Q
class BaseSearchFilterSet(django_filters.FilterSet):
query_fields = ["content"]
q = django_filters.CharFilter(
method="auto_query",
widget=forms.TextInput(
attrs={"placeholder": _("Enter search term"), "class": "form-control"}
),
)
def __init__(self, *args, **kwargs):
self.facet_config = kwargs.pop("facet_config", {})
self.view = kwargs.pop("view", None)
super().__init__(*args, **kwargs)
def apply_filter(self, qs, name, *args, **kwargs):
if name in self.facet_config:
return qs.post_filter(name, *args, **kwargs)
return qs.filter(*args, **kwargs)
def filter_queryset(self, queryset):
"""
Filter the queryset with the underlying form's `cleaned_data`. You must
call `is_valid()` or `errors` before calling this method.
This method should be overridden if additional filtering needs to be
applied to the queryset before it is cached.
"""
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
# assert isinstance(queryset, models.QuerySet), \
# "Expected '%s.%s' to return a QuerySet, but got a %s instead." \
# % (type(self).__name__, name, type(queryset).__name__)
return queryset
def auto_query(self, qs, name, value):
if value:
return qs.set_query(
Q(
"simple_query_string",
query=value,
fields=self.query_fields,
default_operator="and",
lenient=True,
)
)
return qs
|
normal
|
{
"blob_id": "f225fbf363f1b170704418ed339f2e57ca790975",
"index": 5317,
"step-1": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n <mask token>\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-2": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-3": "<mask token>\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = ['content']\n q = django_filters.CharFilter(method='auto_query', widget=forms.\n TextInput(attrs={'placeholder': _('Enter search term'), 'class':\n 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-4": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\nimport django_filters\nfrom elasticsearch_dsl.query import Q\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = ['content']\n q = django_filters.CharFilter(method='auto_query', widget=forms.\n TextInput(attrs={'placeholder': _('Enter search term'), 'class':\n 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop('facet_config', {})\n self.view = kwargs.pop('view', None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(Q('simple_query_string', query=value,\n fields=self.query_fields, default_operator='and', lenient=True)\n )\n return qs\n",
"step-5": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nimport django_filters\nfrom elasticsearch_dsl.query import Q\n\n\nclass BaseSearchFilterSet(django_filters.FilterSet):\n query_fields = [\"content\"]\n\n q = django_filters.CharFilter(\n method=\"auto_query\",\n widget=forms.TextInput(\n attrs={\"placeholder\": _(\"Enter search term\"), \"class\": \"form-control\"}\n ),\n )\n\n def __init__(self, *args, **kwargs):\n self.facet_config = kwargs.pop(\"facet_config\", {})\n self.view = kwargs.pop(\"view\", None)\n super().__init__(*args, **kwargs)\n\n def apply_filter(self, qs, name, *args, **kwargs):\n if name in self.facet_config:\n return qs.post_filter(name, *args, **kwargs)\n return qs.filter(*args, **kwargs)\n\n def filter_queryset(self, queryset):\n \"\"\"\n Filter the queryset with the underlying form's `cleaned_data`. You must\n call `is_valid()` or `errors` before calling this method.\n This method should be overridden if additional filtering needs to be\n applied to the queryset before it is cached.\n \"\"\"\n for name, value in self.form.cleaned_data.items():\n queryset = self.filters[name].filter(queryset, value)\n # assert isinstance(queryset, models.QuerySet), \\\n # \"Expected '%s.%s' to return a QuerySet, but got a %s instead.\" \\\n # % (type(self).__name__, name, type(queryset).__name__)\n return queryset\n\n def auto_query(self, qs, name, value):\n if value:\n return qs.set_query(\n Q(\n \"simple_query_string\",\n query=value,\n fields=self.query_fields,\n default_operator=\"and\",\n lenient=True,\n )\n )\n return qs\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
image = Image.open('../datas/xiaoren.png')
img = np.asarray(image)
print(img.shape)
imageNew = np.zeros((600, 100, 3))
imageNew = imageNew.astype(np.uint8)
misc.imsave('m.png', imageNew)
<|reserved_special_token_1|>
import numpy as np
from PIL import Image
from scipy import misc
if __name__ == '__main__':
image = Image.open('../datas/xiaoren.png')
img = np.asarray(image)
print(img.shape)
imageNew = np.zeros((600, 100, 3))
imageNew = imageNew.astype(np.uint8)
misc.imsave('m.png', imageNew)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/6/26 16:11
# @Author : Micky
# @Site :
# @File : 01_压缩相关知识.py
# @Software: PyCharm
import numpy as np
from PIL import Image
from scipy import misc
if __name__ == '__main__':
# 图像加载
image = Image.open('../datas/xiaoren.png')
# 图像转换为numpy数组
img = np.asarray(image)
print(img.shape)
# 构建一个新的图像
imageNew = np.zeros((600,100,3))
imageNew = imageNew.astype(np.uint8)
misc.imsave('m.png',imageNew)
|
flexible
|
{
"blob_id": "176120d4f40bc02b69d7283b7853b74adf369141",
"index": 4726,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n image = Image.open('../datas/xiaoren.png')\n img = np.asarray(image)\n print(img.shape)\n imageNew = np.zeros((600, 100, 3))\n imageNew = imageNew.astype(np.uint8)\n misc.imsave('m.png', imageNew)\n",
"step-3": "import numpy as np\nfrom PIL import Image\nfrom scipy import misc\nif __name__ == '__main__':\n image = Image.open('../datas/xiaoren.png')\n img = np.asarray(image)\n print(img.shape)\n imageNew = np.zeros((600, 100, 3))\n imageNew = imageNew.astype(np.uint8)\n misc.imsave('m.png', imageNew)\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/6/26 16:11\n# @Author : Micky\n# @Site : \n# @File : 01_压缩相关知识.py\n# @Software: PyCharm\n\nimport numpy as np\nfrom PIL import Image\nfrom scipy import misc\n\nif __name__ == '__main__':\n # 图像加载\n image = Image.open('../datas/xiaoren.png')\n # 图像转换为numpy数组\n img = np.asarray(image)\n print(img.shape)\n\n # 构建一个新的图像\n imageNew = np.zeros((600,100,3))\n imageNew = imageNew.astype(np.uint8)\n misc.imsave('m.png',imageNew)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
import json
import logging
from grab import Grab
from actions import get_course_gold, get_chat_type, get_indexes, group_chat_id
# logging.basicConfig(
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# level=logging.DEBUG)
# logger = logging.getLogger(__name__)
results = None
timestamp = datetime.datetime.now()
date = timestamp.date()
date_post = date
caption = None
def check_date():
global results
global date_post
current_datetime = datetime.datetime.now()
current_date = current_datetime.date()
if date_post is not None:
# noinspection PyTypeChecker
if date_post < current_date:
results = None
date_post = None
else:
pass
def get_every_day():
global caption
global date_post
url = "https://pp.userapi.com/"
g = Grab()
g.go("https://vk.com/skorpw",
user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 '
'YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36')
# list = g.doc.body.decode('cp1251')
try:
image = g.doc.select(
'.//*[@id="public_wall"]/*[@id="page_wall_posts"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick')[
0].text()
caption = 'Ежа'
date_time = datetime.datetime.now()
date_post = date_time.date()
json_string = get_indexes(image)
res = json.loads(json_string)
result = res['temp']['y']
url_image = result
#url_image=result[0]
#url_image="http://www.kartinki.me/pic/201506/1920x1200/kartinki.me-21699.jpg"
return url_image
except IndexError:
return None
def uid_from_update(update):
"""
Extract the chat id from update
:param update: `telegram.Update`
:return: chat_id extracted from the update
"""
chat_id = None
try:
chat_id = update.message.from_user.id
except (NameError, AttributeError):
try:
chat_id = update.inline_query.from_user.id
except (NameError, AttributeError):
try:
chat_id = update.chosen_inline_result.from_user.id
except (NameError, AttributeError):
try:
chat_id = update.callback_query.from_user.id
except (NameError, AttributeError):
logging.error("No chat_id available in update.")
return chat_id
def start(bot, update):
chat_id = uid_from_update(update)
bot.sendMessage(chat_id=chat_id, text="Приветули")
def get_gold(bot, update):
chat_type = get_chat_type(update)
response = get_course_gold()
if chat_type == "group":
bot.sendMessage(chat_id=group_chat_id(update), text=response,
reply_to_message_id=update.message.message_id)
else:
bot.sendMessage(chat_id=uid_from_update(update), text=response,
reply_to_message_id=update.message.message_id)
def get_everyday(bot, update):
global results
check_date()
chat_type = get_chat_type(update)
if results is None:
results = get_every_day()
if results is not None:
if chat_type == "group":
bot.sendPhoto(chat_id=group_chat_id(update), photo=results,
reply_to_message_id=update.message.message_id,
caption=caption)
else:
bot.sendPhoto(chat_id=uid_from_update(update), photo=results,
reply_to_message_id=update.message.message_id, caption=caption)
else:
if chat_type == "group":
bot.sendMessage(chat_id=group_chat_id(update), text="Ошибка, повторите позже",
reply_to_message_id=update.message.message_id,
caption=caption)
else:
bot.sendMessage(chat_id=uid_from_update(update), text="Ошибка, повторите позже",
reply_to_message_id=update.message.message_id, caption=caption)
else:
if chat_type == "group":
bot.sendPhoto(chat_id=group_chat_id(update), photo=results,
reply_to_message_id=update.message.message_id,
caption=caption)
else:
bot.sendPhoto(chat_id=uid_from_update(update), photo=results,
reply_to_message_id=update.message.message_id, caption=caption)
|
normal
|
{
"blob_id": "c4720eb5a42267970d3a98517dce7857c0ba8450",
"index": 8938,
"step-1": "<mask token>\n\n\ndef check_date():\n global results\n global date_post\n current_datetime = datetime.datetime.now()\n current_date = current_datetime.date()\n if date_post is not None:\n if date_post < current_date:\n results = None\n date_post = None\n else:\n pass\n\n\ndef get_every_day():\n global caption\n global date_post\n url = 'https://pp.userapi.com/'\n g = Grab()\n g.go('https://vk.com/skorpw', user_agent=\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36'\n )\n try:\n image = g.doc.select(\n './/*[@id=\"public_wall\"]/*[@id=\"page_wall_posts\"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick'\n )[0].text()\n caption = 'Ежа'\n date_time = datetime.datetime.now()\n date_post = date_time.date()\n json_string = get_indexes(image)\n res = json.loads(json_string)\n result = res['temp']['y']\n url_image = result\n return url_image\n except IndexError:\n return None\n\n\ndef uid_from_update(update):\n \"\"\"\n Extract the chat id from update\n :param update: `telegram.Update`\n :return: chat_id extracted from the update\n \"\"\"\n chat_id = None\n try:\n chat_id = update.message.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.inline_query.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.chosen_inline_result.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.callback_query.from_user.id\n except (NameError, AttributeError):\n logging.error('No chat_id available in update.')\n return chat_id\n\n\n<mask token>\n\n\ndef get_everyday(bot, update):\n global results\n check_date()\n chat_type = get_chat_type(update)\n if results is None:\n results = get_every_day()\n if results is not None:\n if chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=\n caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=\n results, reply_to_message_id=update.message.message_id,\n caption=caption)\n elif chat_type == 'group':\n bot.sendMessage(chat_id=group_chat_id(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n elif chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n",
"step-2": "<mask token>\n\n\ndef check_date():\n global results\n global date_post\n current_datetime = datetime.datetime.now()\n current_date = current_datetime.date()\n if date_post is not None:\n if date_post < current_date:\n results = None\n date_post = None\n else:\n pass\n\n\ndef get_every_day():\n global caption\n global date_post\n url = 'https://pp.userapi.com/'\n g = Grab()\n g.go('https://vk.com/skorpw', user_agent=\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36'\n )\n try:\n image = g.doc.select(\n './/*[@id=\"public_wall\"]/*[@id=\"page_wall_posts\"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick'\n )[0].text()\n caption = 'Ежа'\n date_time = datetime.datetime.now()\n date_post = date_time.date()\n json_string = get_indexes(image)\n res = json.loads(json_string)\n result = res['temp']['y']\n url_image = result\n return url_image\n except IndexError:\n return None\n\n\ndef uid_from_update(update):\n \"\"\"\n Extract the chat id from update\n :param update: `telegram.Update`\n :return: chat_id extracted from the update\n \"\"\"\n chat_id = None\n try:\n chat_id = update.message.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.inline_query.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.chosen_inline_result.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.callback_query.from_user.id\n except (NameError, AttributeError):\n logging.error('No chat_id available in update.')\n return chat_id\n\n\ndef start(bot, update):\n chat_id = uid_from_update(update)\n bot.sendMessage(chat_id=chat_id, text='Приветули')\n\n\n<mask token>\n\n\ndef get_everyday(bot, update):\n global results\n check_date()\n chat_type = get_chat_type(update)\n if results is None:\n results = get_every_day()\n if results is not None:\n if chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=\n caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=\n results, reply_to_message_id=update.message.message_id,\n caption=caption)\n elif chat_type == 'group':\n bot.sendMessage(chat_id=group_chat_id(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n elif chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n",
"step-3": "<mask token>\n\n\ndef check_date():\n global results\n global date_post\n current_datetime = datetime.datetime.now()\n current_date = current_datetime.date()\n if date_post is not None:\n if date_post < current_date:\n results = None\n date_post = None\n else:\n pass\n\n\ndef get_every_day():\n global caption\n global date_post\n url = 'https://pp.userapi.com/'\n g = Grab()\n g.go('https://vk.com/skorpw', user_agent=\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36'\n )\n try:\n image = g.doc.select(\n './/*[@id=\"public_wall\"]/*[@id=\"page_wall_posts\"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick'\n )[0].text()\n caption = 'Ежа'\n date_time = datetime.datetime.now()\n date_post = date_time.date()\n json_string = get_indexes(image)\n res = json.loads(json_string)\n result = res['temp']['y']\n url_image = result\n return url_image\n except IndexError:\n return None\n\n\ndef uid_from_update(update):\n \"\"\"\n Extract the chat id from update\n :param update: `telegram.Update`\n :return: chat_id extracted from the update\n \"\"\"\n chat_id = None\n try:\n chat_id = update.message.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.inline_query.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.chosen_inline_result.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.callback_query.from_user.id\n except (NameError, AttributeError):\n logging.error('No chat_id available in update.')\n return chat_id\n\n\ndef start(bot, update):\n chat_id = uid_from_update(update)\n bot.sendMessage(chat_id=chat_id, text='Приветули')\n\n\ndef get_gold(bot, update):\n chat_type = get_chat_type(update)\n response = get_course_gold()\n if chat_type == 'group':\n bot.sendMessage(chat_id=group_chat_id(update), text=response,\n reply_to_message_id=update.message.message_id)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=response,\n reply_to_message_id=update.message.message_id)\n\n\ndef get_everyday(bot, update):\n global results\n check_date()\n chat_type = get_chat_type(update)\n if results is None:\n results = get_every_day()\n if results is not None:\n if chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=\n caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=\n results, reply_to_message_id=update.message.message_id,\n caption=caption)\n elif chat_type == 'group':\n bot.sendMessage(chat_id=group_chat_id(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n elif chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n",
"step-4": "<mask token>\nresults = None\ntimestamp = datetime.datetime.now()\ndate = timestamp.date()\ndate_post = date\ncaption = None\n\n\ndef check_date():\n global results\n global date_post\n current_datetime = datetime.datetime.now()\n current_date = current_datetime.date()\n if date_post is not None:\n if date_post < current_date:\n results = None\n date_post = None\n else:\n pass\n\n\ndef get_every_day():\n global caption\n global date_post\n url = 'https://pp.userapi.com/'\n g = Grab()\n g.go('https://vk.com/skorpw', user_agent=\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36'\n )\n try:\n image = g.doc.select(\n './/*[@id=\"public_wall\"]/*[@id=\"page_wall_posts\"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick'\n )[0].text()\n caption = 'Ежа'\n date_time = datetime.datetime.now()\n date_post = date_time.date()\n json_string = get_indexes(image)\n res = json.loads(json_string)\n result = res['temp']['y']\n url_image = result\n return url_image\n except IndexError:\n return None\n\n\ndef uid_from_update(update):\n \"\"\"\n Extract the chat id from update\n :param update: `telegram.Update`\n :return: chat_id extracted from the update\n \"\"\"\n chat_id = None\n try:\n chat_id = update.message.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.inline_query.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.chosen_inline_result.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.callback_query.from_user.id\n except (NameError, AttributeError):\n logging.error('No chat_id available in update.')\n return chat_id\n\n\ndef start(bot, update):\n chat_id = uid_from_update(update)\n bot.sendMessage(chat_id=chat_id, text='Приветули')\n\n\ndef get_gold(bot, update):\n chat_type = get_chat_type(update)\n response = get_course_gold()\n if chat_type == 'group':\n bot.sendMessage(chat_id=group_chat_id(update), text=response,\n reply_to_message_id=update.message.message_id)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=response,\n reply_to_message_id=update.message.message_id)\n\n\ndef get_everyday(bot, update):\n global results\n check_date()\n chat_type = get_chat_type(update)\n if results is None:\n results = get_every_day()\n if results is not None:\n if chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=\n caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=\n results, reply_to_message_id=update.message.message_id,\n caption=caption)\n elif chat_type == 'group':\n bot.sendMessage(chat_id=group_chat_id(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=\n 'Ошибка, повторите позже', reply_to_message_id=update.\n message.message_id, caption=caption)\n elif chat_type == 'group':\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n",
"step-5": "import datetime\nimport json\nimport logging\n\nfrom grab import Grab\n\nfrom actions import get_course_gold, get_chat_type, get_indexes, group_chat_id\n\n# logging.basicConfig(\n# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n# level=logging.DEBUG)\n# logger = logging.getLogger(__name__)\n\nresults = None\ntimestamp = datetime.datetime.now()\ndate = timestamp.date()\ndate_post = date\ncaption = None\n\n\ndef check_date():\n global results\n global date_post\n current_datetime = datetime.datetime.now()\n current_date = current_datetime.date()\n if date_post is not None:\n # noinspection PyTypeChecker\n if date_post < current_date:\n results = None\n date_post = None\n else:\n pass\n\t\t\n\ndef get_every_day():\n global caption\n global date_post\n url = \"https://pp.userapi.com/\"\n g = Grab()\n g.go(\"https://vk.com/skorpw\",\n user_agent='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 '\n 'YaBrowser/17.11.1.990 Yowser/2.5 Safari/537.36')\n # list = g.doc.body.decode('cp1251')\n try:\n image = g.doc.select(\n './/*[@id=\"public_wall\"]/*[@id=\"page_wall_posts\"]/div/div/div[2]/div[1]/div[1]/div[1]/div[2]/a[@aria-label]/@onclick')[\n 0].text()\n caption = 'Ежа'\n date_time = datetime.datetime.now()\n date_post = date_time.date()\n json_string = get_indexes(image)\n res = json.loads(json_string)\n result = res['temp']['y']\n url_image = result\n #url_image=result[0]\n #url_image=\"http://www.kartinki.me/pic/201506/1920x1200/kartinki.me-21699.jpg\"\n return url_image\n except IndexError:\n return None\n\n\ndef uid_from_update(update):\n \"\"\"\n Extract the chat id from update\n :param update: `telegram.Update`\n :return: chat_id extracted from the update\n \"\"\"\n chat_id = None\n try:\n chat_id = update.message.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.inline_query.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.chosen_inline_result.from_user.id\n except (NameError, AttributeError):\n try:\n chat_id = update.callback_query.from_user.id\n except (NameError, AttributeError):\n logging.error(\"No chat_id available in update.\")\n return chat_id\n\n\ndef start(bot, update):\n chat_id = uid_from_update(update)\n bot.sendMessage(chat_id=chat_id, text=\"Приветули\")\n\n\ndef get_gold(bot, update):\n chat_type = get_chat_type(update)\n response = get_course_gold()\n if chat_type == \"group\":\n bot.sendMessage(chat_id=group_chat_id(update), text=response,\n reply_to_message_id=update.message.message_id)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=response,\n reply_to_message_id=update.message.message_id)\n\n\ndef get_everyday(bot, update):\n global results\n check_date()\n chat_type = get_chat_type(update)\n if results is None:\n results = get_every_day()\n if results is not None:\n if chat_type == \"group\":\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id,\n caption=caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n else:\n if chat_type == \"group\":\n bot.sendMessage(chat_id=group_chat_id(update), text=\"Ошибка, повторите позже\",\n reply_to_message_id=update.message.message_id,\n caption=caption)\n else:\n bot.sendMessage(chat_id=uid_from_update(update), text=\"Ошибка, повторите позже\",\n reply_to_message_id=update.message.message_id, caption=caption)\n else:\n if chat_type == \"group\":\n bot.sendPhoto(chat_id=group_chat_id(update), photo=results,\n reply_to_message_id=update.message.message_id,\n caption=caption)\n else:\n bot.sendPhoto(chat_id=uid_from_update(update), photo=results,\n reply_to_message_id=update.message.message_id, caption=caption)\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'UTF-8').encode(), addr))
from_addr = 'gaofeng4280@163.com'
to_addr = '1071380275@qq.com'
smtp_server = 'smtp.163.com'
passwd = input('Password: ')
msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
msg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)
msg['To'] = _format_addr('开发者<%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login()
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
|
normal
|
{
"blob_id": "4dd71d01e499f3d0ee49d3bf5204fb3bbb03ede5",
"index": 2976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'UTF-8').encode(), addr))\n\n\n<mask token>\nserver.set_debuglevel(1)\nserver.login()\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n",
"step-3": "<mask token>\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'UTF-8').encode(), addr))\n\n\nfrom_addr = 'gaofeng4280@163.com'\nto_addr = '1071380275@qq.com'\nsmtp_server = 'smtp.163.com'\npasswd = input('Password: ')\nmsg = MIMEText('hello, send by Python...', 'plain', 'utf-8')\nmsg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)\nmsg['To'] = _format_addr('开发者<%s>' % to_addr)\nmsg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login()\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n",
"step-4": "from email import encoders\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.utils import parseaddr, formataddr\nimport smtplib\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'UTF-8').encode(), addr))\n\n\nfrom_addr = 'gaofeng4280@163.com'\nto_addr = '1071380275@qq.com'\nsmtp_server = 'smtp.163.com'\npasswd = input('Password: ')\nmsg = MIMEText('hello, send by Python...', 'plain', 'utf-8')\nmsg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)\nmsg['To'] = _format_addr('开发者<%s>' % to_addr)\nmsg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login()\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class FixtureBittrex:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class FixtureBittrex:
PING = {'serverTime': 1582535502000}
MARKETS = [{'symbol': 'ETH-BTC', 'baseCurrencySymbol': 'ETH',
'quoteCurrencySymbol': 'BTC', 'minTradeSize': '0.01314872',
'precision': 8, 'status': 'ONLINE', 'createdAt':
'2015-08-14T09:02:24.817Z'}, {'symbol': 'BTC-USDT',
'baseCurrencySymbol': 'BTC', 'quoteCurrencySymbol': 'USDT',
'minTradeSize': '0.00025334', 'precision': 8, 'status': 'ONLINE',
'createdAt': '2015-12-11T06:31:40.633Z', 'notice': ''}, {'symbol':
'BTC-USD', 'baseCurrencySymbol': 'BTC', 'quoteCurrencySymbol':
'USD', 'minTradeSize': '0.00025427', 'precision': 3, 'status':
'ONLINE', 'createdAt': '2018-05-31T13:24:40.77Z'}, {'symbol':
'ETH-USDT', 'baseCurrencySymbol': 'ETH', 'quoteCurrencySymbol':
'USDT', 'minTradeSize': '0.01334966', 'precision': 8, 'status':
'ONLINE', 'createdAt': '2017-04-20T17:26:37.647Z', 'notice': ''}]
MARKETS_TICKERS = [{'symbol': 'ETH-BTC', 'lastTradeRate': '0.02739396',
'bidRate': '0.02740726', 'askRate': '0.02741416'}, {'symbol':
'ETH-USDT', 'lastTradeRate': '267.26100000', 'bidRate':
'266.96646649', 'askRate': '267.22586512'}, {'symbol': 'BTC-USDT',
'lastTradeRate': '9758.81200003', 'bidRate': '9760.51000000',
'askRate': '9765.82533436'}, {'symbol': 'BTC-USD', 'lastTradeRate':
'9770.73200000', 'bidRate': '9767.64400000', 'askRate':
'9770.73200000'}]
BALANCES = [{'currencySymbol': 'BTC', 'total': '0.00279886',
'available': '0.00279886'}, {'currencySymbol': 'BTXCRD', 'total':
'1031.33915356', 'available': '1031.33915356'}, {'currencySymbol':
'ETH', 'total': '0.24010276', 'available': '0.24010276'}, {
'currencySymbol': 'USDT', 'total': '76.30113330', 'available':
'67.48856276'}, {'currencySymbol': 'XZC', 'total': '4.99205590',
'available': '4.99205590'}, {'currencySymbol': 'ZRX', 'total':
'0.00000000', 'available': '0.00000000'}]
FILLED_BUY_LIMIT_ORDER = {'id': 'd7850281-0440-4478-879f-248499b2134d',
'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',
'quantity': '0.06000000', 'limit': '268.09208274', 'timeInForce':
'GOOD_TIL_CANCELLED', 'fillQuantity': '0.06000000', 'commission':
'0.01333791', 'proceeds': '5.33516582', 'status': 'CLOSED',
'createdAt': '2020-02-24T09:38:13.1Z', 'updatedAt':
'2020-02-24T09:38:13.1Z', 'closedAt': '2020-02-24T09:38:13.1Z'}
OPEN_BUY_LIMIT_ORDER = {'id': '615aa7de-3ff9-486d-98d7-2d37aca212c9',
'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',
'quantity': '0.06000000', 'limit': '205.64319999', 'timeInForce':
'GOOD_TIL_CANCELLED', 'fillQuantity': '0.00000000', 'commission':
'0.00000000', 'proceeds': '0.00000000', 'status': 'OPEN',
'createdAt': '2020-02-25T11:13:32.12Z', 'updatedAt':
'2020-02-25T11:13:32.12Z'}
CANCEL_ORDER = {'id': '615aa7de-3ff9-486d-98d7-2d37aca212c9',
'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',
'quantity': '0.06000000', 'limit': '205.64319999', 'timeInForce':
'GOOD_TIL_CANCELLED', 'fillQuantity': '0.00000000', 'commission':
'0.00000000', 'proceeds': '0.00000000', 'status': 'CLOSED',
'createdAt': '2020-02-25T11:13:32.12Z', 'updatedAt':
'2020-02-25T11:13:33.63Z', 'closedAt': '2020-02-25T11:13:33.63Z'}
ORDERS_OPEN = [{'id': '9854dc2a-0762-408d-922f-882f4359c517',
'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',
'quantity': '0.03000000', 'limit': '134.75247524', 'timeInForce':
'GOOD_TIL_CANCELLED', 'fillQuantity': '0.00000000', 'commission':
'0.00000000', 'proceeds': '0.00000000', 'status': 'OPEN',
'createdAt': '2020-01-10T10:25:25.13Z', 'updatedAt':
'2020-01-10T10:25:25.13Z'}, {'id':
'261d9158-c9c1-40a6-bad8-4b447a471d8f', 'marketSymbol': 'ETH-USDT',
'direction': 'BUY', 'type': 'LIMIT', 'quantity': '0.03000000',
'limit': '158.26732673', 'timeInForce': 'GOOD_TIL_CANCELLED',
'fillQuantity': '0.00000000', 'commission': '0.00000000',
'proceeds': '0.00000000', 'status': 'OPEN', 'createdAt':
'2020-01-26T02:58:14.19Z', 'updatedAt': '2020-01-26T02:58:14.19Z'}]
WS_AFTER_BUY_2 = {'event_type': 'uO', 'content': {'w':
'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 8, 'TY': 2, 'o': {'U':
'00000000-0000-0000-0000-000000000000', 'I': 4551095126, 'OU':
'd67c837e-56c5-41e2-b65b-fe590eb06eaf', 'E': 'ETH-USDT', 'OT':
'LIMIT_BUY', 'Q': 0.06, 'q': 0.0, 'X': 269.05759499, 'n':
0.01338594, 'P': 5.35437999, 'PU': 267.7189995, 'Y': 1582540341630,
'C': 1582540341630, 'i': False, 'CI': False, 'K': False, 'k': False,
'J': None, 'j': None, 'u': 1582540341630, 'PassthroughUuid': None}},
'error': None, 'time': '2020-02-24T10:32:21'}
WS_AFTER_BUY_1 = {'event_type': 'uO', 'content': {'w':
'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 13, 'TY': 0, 'o': {'U':
'00000000-0000-0000-0000-000000000000', 'I': 4564385840, 'OU':
'615aa7de-3ff9-486d-98d7-2d37aca212c9', 'E': 'ETH-USDT', 'OT':
'LIMIT_BUY', 'Q': 0.06, 'q': 0.06, 'X': 205.64319999, 'n': 0.0, 'P':
0.0, 'PU': 0.0, 'Y': 1582629212120, 'C': None, 'i': True, 'CI':
False, 'K': False, 'k': False, 'J': None, 'j': None, 'u':
1582629212120, 'PassthroughUuid': None}}, 'error': None, 'time':
'2020-02-25T11:13:32'}
WS_AFTER_SELL_2 = {'event_type': 'uO', 'content': {'w':
'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 10, 'TY': 2, 'o': {'U':
'00000000-0000-0000-0000-000000000000', 'I': 4279414326, 'OU':
'447256cc-9335-41f3-bec9-7392804d30cd', 'E': 'ETH-USDT', 'OT':
'LIMIT_SELL', 'Q': 0.06, 'q': 0.0, 'X': 257.72689, 'n': 0.0129511,
'P': 5.18044, 'PU': 259.022, 'Y': 1582627522640, 'C': 1582627522640,
'i': False, 'CI': False, 'K': False, 'k': False, 'J': None, 'j':
None, 'u': 1582627522640, 'PassthroughUuid': None}}, 'error': None,
'time': '2020-02-25T10:45:22'}
WS_ORDER_BOOK_SNAPSHOT = {'nonce': 115097, 'type': 'snapshot',
'results': {'M': 'ETH-USDT', 'N': 115097, 'Z': [{'Q': 3.7876, 'R':
261.805}, {'Q': 3.99999998, 'R': 261.80200001}, {'Q': 20.92267278,
'R': 261.75575521}], 'S': [{'Q': 3.618, 'R': 262.06976758}, {'Q':
1.2, 'R': 262.06976759}, {'Q': 4.0241, 'R': 262.07}], 'f': [{'I':
53304378, 'T': 1582604545290, 'Q': 1.75736397, 'P': 261.83, 't':
460.1306082651, 'F': 'FILL', 'OT': 'SELL', 'U':
'a0de16e3-6f6d-43f0-b9ea-a8c1f9835223'}, {'I': 53304377, 'T':
1582604544910, 'Q': 0.42976603, 'P': 261.83, 't': 112.5256396349,
'F': 'FILL', 'OT': 'SELL', 'U':
'dc723d5e-2af5-4010-9eb2-a915f050015e'}]}}
<|reserved_special_token_1|>
class FixtureBittrex:
PING = {"serverTime": 1582535502000}
MARKETS = [
{
"symbol": "ETH-BTC", "baseCurrencySymbol": "ETH", "quoteCurrencySymbol": "BTC",
"minTradeSize": "0.01314872", "precision": 8,
"status": "ONLINE", "createdAt": "2015-08-14T09:02:24.817Z"},
{
"symbol": "BTC-USDT", "baseCurrencySymbol": "BTC", "quoteCurrencySymbol": "USDT",
"minTradeSize": "0.00025334", "precision": 8,
"status": "ONLINE", "createdAt": "2015-12-11T06:31:40.633Z", "notice": ""},
{
"symbol": "BTC-USD", "baseCurrencySymbol": "BTC", "quoteCurrencySymbol": "USD",
"minTradeSize": "0.00025427", "precision": 3,
"status": "ONLINE", "createdAt": "2018-05-31T13:24:40.77Z"},
{
"symbol": "ETH-USDT", "baseCurrencySymbol": "ETH", "quoteCurrencySymbol": "USDT",
"minTradeSize": "0.01334966", "precision": 8,
"status": "ONLINE", "createdAt": "2017-04-20T17:26:37.647Z", "notice": ""}
]
MARKETS_TICKERS = [
{
"symbol": "ETH-BTC", "lastTradeRate": "0.02739396",
"bidRate": "0.02740726", "askRate": "0.02741416"},
{
"symbol": "ETH-USDT", "lastTradeRate": "267.26100000",
"bidRate": "266.96646649", "askRate": "267.22586512"},
{
"symbol": "BTC-USDT", "lastTradeRate": "9758.81200003",
"bidRate": "9760.51000000", "askRate": "9765.82533436"},
{
"symbol": "BTC-USD", "lastTradeRate": "9770.73200000",
"bidRate": "9767.64400000", "askRate": "9770.73200000"}
]
# General User Info
BALANCES = [{"currencySymbol": "BTC", "total": "0.00279886", "available": "0.00279886"},
{"currencySymbol": "BTXCRD", "total": "1031.33915356", "available": "1031.33915356"},
{"currencySymbol": "ETH", "total": "0.24010276", "available": "0.24010276"},
{"currencySymbol": "USDT", "total": "76.30113330", "available": "67.48856276"},
{"currencySymbol": "XZC", "total": "4.99205590", "available": "4.99205590"},
{"currencySymbol": "ZRX", "total": "0.00000000", "available": "0.00000000"}]
# User Trade Info
FILLED_BUY_LIMIT_ORDER = {
"id": "d7850281-0440-4478-879f-248499b2134d", "marketSymbol": "ETH-USDT", "direction": "BUY",
"type": "LIMIT", "quantity": "0.06000000", "limit": "268.09208274",
"timeInForce": "GOOD_TIL_CANCELLED", "fillQuantity": "0.06000000", "commission": "0.01333791",
"proceeds": "5.33516582", "status": "CLOSED", "createdAt": "2020-02-24T09:38:13.1Z",
"updatedAt": "2020-02-24T09:38:13.1Z", "closedAt": "2020-02-24T09:38:13.1Z"}
OPEN_BUY_LIMIT_ORDER = {
"id": "615aa7de-3ff9-486d-98d7-2d37aca212c9", "marketSymbol": "ETH-USDT", "direction": "BUY",
"type": "LIMIT", "quantity": "0.06000000", "limit": "205.64319999",
"timeInForce": "GOOD_TIL_CANCELLED", "fillQuantity": "0.00000000", "commission": "0.00000000",
"proceeds": "0.00000000", "status": "OPEN", "createdAt": "2020-02-25T11:13:32.12Z",
"updatedAt": "2020-02-25T11:13:32.12Z"}
CANCEL_ORDER = {
"id": "615aa7de-3ff9-486d-98d7-2d37aca212c9", "marketSymbol": "ETH-USDT", "direction": "BUY",
"type": "LIMIT", "quantity": "0.06000000", "limit": "205.64319999",
"timeInForce": "GOOD_TIL_CANCELLED", "fillQuantity": "0.00000000", "commission": "0.00000000",
"proceeds": "0.00000000", "status": "CLOSED", "createdAt": "2020-02-25T11:13:32.12Z",
"updatedAt": "2020-02-25T11:13:33.63Z", "closedAt": "2020-02-25T11:13:33.63Z"}
ORDERS_OPEN = [
{
"id": "9854dc2a-0762-408d-922f-882f4359c517", "marketSymbol": "ETH-USDT", "direction": "BUY", "type": "LIMIT",
"quantity": "0.03000000", "limit": "134.75247524", "timeInForce": "GOOD_TIL_CANCELLED",
"fillQuantity": "0.00000000", "commission": "0.00000000", "proceeds": "0.00000000", "status": "OPEN",
"createdAt": "2020-01-10T10:25:25.13Z", "updatedAt": "2020-01-10T10:25:25.13Z"},
{
"id": "261d9158-c9c1-40a6-bad8-4b447a471d8f", "marketSymbol": "ETH-USDT", "direction": "BUY", "type": "LIMIT",
"quantity": "0.03000000", "limit": "158.26732673", "timeInForce": "GOOD_TIL_CANCELLED",
"fillQuantity": "0.00000000", "commission": "0.00000000", "proceeds": "0.00000000", "status": "OPEN",
"createdAt": "2020-01-26T02:58:14.19Z", "updatedAt": "2020-01-26T02:58:14.19Z"}
]
WS_AFTER_BUY_2 = {
'event_type': 'uO', 'content': {
'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 8, 'TY': 2,
'o': {
'U': '00000000-0000-0000-0000-000000000000',
'I': 4551095126,
'OU': 'd67c837e-56c5-41e2-b65b-fe590eb06eaf',
'E': 'ETH-USDT', 'OT': 'LIMIT_BUY', 'Q': 0.06, 'q': 0.0,
'X': 269.05759499, 'n': 0.01338594, 'P': 5.35437999,
'PU': 267.7189995, 'Y': 1582540341630,
'C': 1582540341630, 'i': False, 'CI': False, 'K': False,
'k': False, 'J': None, 'j': None, 'u': 1582540341630,
'PassthroughUuid': None}},
'error': None,
'time': '2020-02-24T10:32:21'
}
WS_AFTER_BUY_1 = {
'event_type': 'uO', 'content': {
'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 13, 'TY': 0,
'o': {
'U': '00000000-0000-0000-0000-000000000000', 'I': 4564385840,
'OU': '615aa7de-3ff9-486d-98d7-2d37aca212c9', 'E': 'ETH-USDT',
'OT': 'LIMIT_BUY', 'Q': 0.06, 'q': 0.06, 'X': 205.64319999, 'n': 0.0,
'P': 0.0, 'PU': 0.0, 'Y': 1582629212120, 'C': None, 'i': True,
'CI': False, 'K': False, 'k': False, 'J': None, 'j': None,
'u': 1582629212120, 'PassthroughUuid': None}},
'error': None,
'time': '2020-02-25T11:13:32'
}
WS_AFTER_SELL_2 = {
'event_type': 'uO',
'content': {
'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 10, 'TY': 2,
'o': {
'U': '00000000-0000-0000-0000-000000000000', 'I': 4279414326,
'OU': '447256cc-9335-41f3-bec9-7392804d30cd', 'E': 'ETH-USDT',
'OT': 'LIMIT_SELL', 'Q': 0.06, 'q': 0.0, 'X': 257.72689, 'n': 0.0129511,
'P': 5.18044, 'PU': 259.022, 'Y': 1582627522640, 'C': 1582627522640,
'i': False, 'CI': False, 'K': False, 'k': False, 'J': None, 'j': None,
'u': 1582627522640, 'PassthroughUuid': None}},
'error': None,
'time': '2020-02-25T10:45:22'}
WS_ORDER_BOOK_SNAPSHOT = {
'nonce': 115097,
'type': 'snapshot',
'results': {
'M': 'ETH-USDT', 'N': 115097,
'Z': [
{'Q': 3.7876, 'R': 261.805},
{'Q': 3.99999998, 'R': 261.80200001},
{'Q': 20.92267278, 'R': 261.75575521}],
'S': [
{'Q': 3.618, 'R': 262.06976758},
{'Q': 1.2, 'R': 262.06976759},
{'Q': 4.0241, 'R': 262.07}],
'f': [
{'I': 53304378, 'T': 1582604545290, 'Q': 1.75736397, 'P': 261.83, 't': 460.1306082651,
'F': 'FILL', 'OT': 'SELL', 'U': 'a0de16e3-6f6d-43f0-b9ea-a8c1f9835223'},
{'I': 53304377, 'T': 1582604544910, 'Q': 0.42976603, 'P': 261.83, 't': 112.5256396349,
'F': 'FILL', 'OT': 'SELL', 'U': 'dc723d5e-2af5-4010-9eb2-a915f050015e'}]}
}
|
flexible
|
{
"blob_id": "eba8e2bda786760898c10d3e75620144973d6236",
"index": 9555,
"step-1": "<mask token>\n",
"step-2": "class FixtureBittrex:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "class FixtureBittrex:\n PING = {'serverTime': 1582535502000}\n MARKETS = [{'symbol': 'ETH-BTC', 'baseCurrencySymbol': 'ETH',\n 'quoteCurrencySymbol': 'BTC', 'minTradeSize': '0.01314872',\n 'precision': 8, 'status': 'ONLINE', 'createdAt':\n '2015-08-14T09:02:24.817Z'}, {'symbol': 'BTC-USDT',\n 'baseCurrencySymbol': 'BTC', 'quoteCurrencySymbol': 'USDT',\n 'minTradeSize': '0.00025334', 'precision': 8, 'status': 'ONLINE',\n 'createdAt': '2015-12-11T06:31:40.633Z', 'notice': ''}, {'symbol':\n 'BTC-USD', 'baseCurrencySymbol': 'BTC', 'quoteCurrencySymbol':\n 'USD', 'minTradeSize': '0.00025427', 'precision': 3, 'status':\n 'ONLINE', 'createdAt': '2018-05-31T13:24:40.77Z'}, {'symbol':\n 'ETH-USDT', 'baseCurrencySymbol': 'ETH', 'quoteCurrencySymbol':\n 'USDT', 'minTradeSize': '0.01334966', 'precision': 8, 'status':\n 'ONLINE', 'createdAt': '2017-04-20T17:26:37.647Z', 'notice': ''}]\n MARKETS_TICKERS = [{'symbol': 'ETH-BTC', 'lastTradeRate': '0.02739396',\n 'bidRate': '0.02740726', 'askRate': '0.02741416'}, {'symbol':\n 'ETH-USDT', 'lastTradeRate': '267.26100000', 'bidRate':\n '266.96646649', 'askRate': '267.22586512'}, {'symbol': 'BTC-USDT',\n 'lastTradeRate': '9758.81200003', 'bidRate': '9760.51000000',\n 'askRate': '9765.82533436'}, {'symbol': 'BTC-USD', 'lastTradeRate':\n '9770.73200000', 'bidRate': '9767.64400000', 'askRate':\n '9770.73200000'}]\n BALANCES = [{'currencySymbol': 'BTC', 'total': '0.00279886',\n 'available': '0.00279886'}, {'currencySymbol': 'BTXCRD', 'total':\n '1031.33915356', 'available': '1031.33915356'}, {'currencySymbol':\n 'ETH', 'total': '0.24010276', 'available': '0.24010276'}, {\n 'currencySymbol': 'USDT', 'total': '76.30113330', 'available':\n '67.48856276'}, {'currencySymbol': 'XZC', 'total': '4.99205590',\n 'available': '4.99205590'}, {'currencySymbol': 'ZRX', 'total':\n '0.00000000', 'available': '0.00000000'}]\n FILLED_BUY_LIMIT_ORDER = {'id': 'd7850281-0440-4478-879f-248499b2134d',\n 'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',\n 'quantity': '0.06000000', 'limit': '268.09208274', 'timeInForce':\n 'GOOD_TIL_CANCELLED', 'fillQuantity': '0.06000000', 'commission':\n '0.01333791', 'proceeds': '5.33516582', 'status': 'CLOSED',\n 'createdAt': '2020-02-24T09:38:13.1Z', 'updatedAt':\n '2020-02-24T09:38:13.1Z', 'closedAt': '2020-02-24T09:38:13.1Z'}\n OPEN_BUY_LIMIT_ORDER = {'id': '615aa7de-3ff9-486d-98d7-2d37aca212c9',\n 'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',\n 'quantity': '0.06000000', 'limit': '205.64319999', 'timeInForce':\n 'GOOD_TIL_CANCELLED', 'fillQuantity': '0.00000000', 'commission':\n '0.00000000', 'proceeds': '0.00000000', 'status': 'OPEN',\n 'createdAt': '2020-02-25T11:13:32.12Z', 'updatedAt':\n '2020-02-25T11:13:32.12Z'}\n CANCEL_ORDER = {'id': '615aa7de-3ff9-486d-98d7-2d37aca212c9',\n 'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',\n 'quantity': '0.06000000', 'limit': '205.64319999', 'timeInForce':\n 'GOOD_TIL_CANCELLED', 'fillQuantity': '0.00000000', 'commission':\n '0.00000000', 'proceeds': '0.00000000', 'status': 'CLOSED',\n 'createdAt': '2020-02-25T11:13:32.12Z', 'updatedAt':\n '2020-02-25T11:13:33.63Z', 'closedAt': '2020-02-25T11:13:33.63Z'}\n ORDERS_OPEN = [{'id': '9854dc2a-0762-408d-922f-882f4359c517',\n 'marketSymbol': 'ETH-USDT', 'direction': 'BUY', 'type': 'LIMIT',\n 'quantity': '0.03000000', 'limit': '134.75247524', 'timeInForce':\n 'GOOD_TIL_CANCELLED', 'fillQuantity': '0.00000000', 'commission':\n '0.00000000', 'proceeds': '0.00000000', 'status': 'OPEN',\n 'createdAt': '2020-01-10T10:25:25.13Z', 'updatedAt':\n '2020-01-10T10:25:25.13Z'}, {'id':\n '261d9158-c9c1-40a6-bad8-4b447a471d8f', 'marketSymbol': 'ETH-USDT',\n 'direction': 'BUY', 'type': 'LIMIT', 'quantity': '0.03000000',\n 'limit': '158.26732673', 'timeInForce': 'GOOD_TIL_CANCELLED',\n 'fillQuantity': '0.00000000', 'commission': '0.00000000',\n 'proceeds': '0.00000000', 'status': 'OPEN', 'createdAt':\n '2020-01-26T02:58:14.19Z', 'updatedAt': '2020-01-26T02:58:14.19Z'}]\n WS_AFTER_BUY_2 = {'event_type': 'uO', 'content': {'w':\n 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 8, 'TY': 2, 'o': {'U':\n '00000000-0000-0000-0000-000000000000', 'I': 4551095126, 'OU':\n 'd67c837e-56c5-41e2-b65b-fe590eb06eaf', 'E': 'ETH-USDT', 'OT':\n 'LIMIT_BUY', 'Q': 0.06, 'q': 0.0, 'X': 269.05759499, 'n': \n 0.01338594, 'P': 5.35437999, 'PU': 267.7189995, 'Y': 1582540341630,\n 'C': 1582540341630, 'i': False, 'CI': False, 'K': False, 'k': False,\n 'J': None, 'j': None, 'u': 1582540341630, 'PassthroughUuid': None}},\n 'error': None, 'time': '2020-02-24T10:32:21'}\n WS_AFTER_BUY_1 = {'event_type': 'uO', 'content': {'w':\n 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 13, 'TY': 0, 'o': {'U':\n '00000000-0000-0000-0000-000000000000', 'I': 4564385840, 'OU':\n '615aa7de-3ff9-486d-98d7-2d37aca212c9', 'E': 'ETH-USDT', 'OT':\n 'LIMIT_BUY', 'Q': 0.06, 'q': 0.06, 'X': 205.64319999, 'n': 0.0, 'P':\n 0.0, 'PU': 0.0, 'Y': 1582629212120, 'C': None, 'i': True, 'CI': \n False, 'K': False, 'k': False, 'J': None, 'j': None, 'u': \n 1582629212120, 'PassthroughUuid': None}}, 'error': None, 'time':\n '2020-02-25T11:13:32'}\n WS_AFTER_SELL_2 = {'event_type': 'uO', 'content': {'w':\n 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 10, 'TY': 2, 'o': {'U':\n '00000000-0000-0000-0000-000000000000', 'I': 4279414326, 'OU':\n '447256cc-9335-41f3-bec9-7392804d30cd', 'E': 'ETH-USDT', 'OT':\n 'LIMIT_SELL', 'Q': 0.06, 'q': 0.0, 'X': 257.72689, 'n': 0.0129511,\n 'P': 5.18044, 'PU': 259.022, 'Y': 1582627522640, 'C': 1582627522640,\n 'i': False, 'CI': False, 'K': False, 'k': False, 'J': None, 'j':\n None, 'u': 1582627522640, 'PassthroughUuid': None}}, 'error': None,\n 'time': '2020-02-25T10:45:22'}\n WS_ORDER_BOOK_SNAPSHOT = {'nonce': 115097, 'type': 'snapshot',\n 'results': {'M': 'ETH-USDT', 'N': 115097, 'Z': [{'Q': 3.7876, 'R': \n 261.805}, {'Q': 3.99999998, 'R': 261.80200001}, {'Q': 20.92267278,\n 'R': 261.75575521}], 'S': [{'Q': 3.618, 'R': 262.06976758}, {'Q': \n 1.2, 'R': 262.06976759}, {'Q': 4.0241, 'R': 262.07}], 'f': [{'I': \n 53304378, 'T': 1582604545290, 'Q': 1.75736397, 'P': 261.83, 't': \n 460.1306082651, 'F': 'FILL', 'OT': 'SELL', 'U':\n 'a0de16e3-6f6d-43f0-b9ea-a8c1f9835223'}, {'I': 53304377, 'T': \n 1582604544910, 'Q': 0.42976603, 'P': 261.83, 't': 112.5256396349,\n 'F': 'FILL', 'OT': 'SELL', 'U':\n 'dc723d5e-2af5-4010-9eb2-a915f050015e'}]}}\n",
"step-4": "class FixtureBittrex:\n PING = {\"serverTime\": 1582535502000}\n\n MARKETS = [\n {\n \"symbol\": \"ETH-BTC\", \"baseCurrencySymbol\": \"ETH\", \"quoteCurrencySymbol\": \"BTC\",\n \"minTradeSize\": \"0.01314872\", \"precision\": 8,\n \"status\": \"ONLINE\", \"createdAt\": \"2015-08-14T09:02:24.817Z\"},\n {\n \"symbol\": \"BTC-USDT\", \"baseCurrencySymbol\": \"BTC\", \"quoteCurrencySymbol\": \"USDT\",\n \"minTradeSize\": \"0.00025334\", \"precision\": 8,\n \"status\": \"ONLINE\", \"createdAt\": \"2015-12-11T06:31:40.633Z\", \"notice\": \"\"},\n {\n \"symbol\": \"BTC-USD\", \"baseCurrencySymbol\": \"BTC\", \"quoteCurrencySymbol\": \"USD\",\n \"minTradeSize\": \"0.00025427\", \"precision\": 3,\n \"status\": \"ONLINE\", \"createdAt\": \"2018-05-31T13:24:40.77Z\"},\n {\n \"symbol\": \"ETH-USDT\", \"baseCurrencySymbol\": \"ETH\", \"quoteCurrencySymbol\": \"USDT\",\n \"minTradeSize\": \"0.01334966\", \"precision\": 8,\n \"status\": \"ONLINE\", \"createdAt\": \"2017-04-20T17:26:37.647Z\", \"notice\": \"\"}\n ]\n\n MARKETS_TICKERS = [\n {\n \"symbol\": \"ETH-BTC\", \"lastTradeRate\": \"0.02739396\",\n \"bidRate\": \"0.02740726\", \"askRate\": \"0.02741416\"},\n {\n \"symbol\": \"ETH-USDT\", \"lastTradeRate\": \"267.26100000\",\n \"bidRate\": \"266.96646649\", \"askRate\": \"267.22586512\"},\n {\n \"symbol\": \"BTC-USDT\", \"lastTradeRate\": \"9758.81200003\",\n \"bidRate\": \"9760.51000000\", \"askRate\": \"9765.82533436\"},\n {\n \"symbol\": \"BTC-USD\", \"lastTradeRate\": \"9770.73200000\",\n \"bidRate\": \"9767.64400000\", \"askRate\": \"9770.73200000\"}\n ]\n\n # General User Info\n BALANCES = [{\"currencySymbol\": \"BTC\", \"total\": \"0.00279886\", \"available\": \"0.00279886\"},\n {\"currencySymbol\": \"BTXCRD\", \"total\": \"1031.33915356\", \"available\": \"1031.33915356\"},\n {\"currencySymbol\": \"ETH\", \"total\": \"0.24010276\", \"available\": \"0.24010276\"},\n {\"currencySymbol\": \"USDT\", \"total\": \"76.30113330\", \"available\": \"67.48856276\"},\n {\"currencySymbol\": \"XZC\", \"total\": \"4.99205590\", \"available\": \"4.99205590\"},\n {\"currencySymbol\": \"ZRX\", \"total\": \"0.00000000\", \"available\": \"0.00000000\"}]\n\n # User Trade Info\n FILLED_BUY_LIMIT_ORDER = {\n \"id\": \"d7850281-0440-4478-879f-248499b2134d\", \"marketSymbol\": \"ETH-USDT\", \"direction\": \"BUY\",\n \"type\": \"LIMIT\", \"quantity\": \"0.06000000\", \"limit\": \"268.09208274\",\n \"timeInForce\": \"GOOD_TIL_CANCELLED\", \"fillQuantity\": \"0.06000000\", \"commission\": \"0.01333791\",\n \"proceeds\": \"5.33516582\", \"status\": \"CLOSED\", \"createdAt\": \"2020-02-24T09:38:13.1Z\",\n \"updatedAt\": \"2020-02-24T09:38:13.1Z\", \"closedAt\": \"2020-02-24T09:38:13.1Z\"}\n\n OPEN_BUY_LIMIT_ORDER = {\n \"id\": \"615aa7de-3ff9-486d-98d7-2d37aca212c9\", \"marketSymbol\": \"ETH-USDT\", \"direction\": \"BUY\",\n \"type\": \"LIMIT\", \"quantity\": \"0.06000000\", \"limit\": \"205.64319999\",\n \"timeInForce\": \"GOOD_TIL_CANCELLED\", \"fillQuantity\": \"0.00000000\", \"commission\": \"0.00000000\",\n \"proceeds\": \"0.00000000\", \"status\": \"OPEN\", \"createdAt\": \"2020-02-25T11:13:32.12Z\",\n \"updatedAt\": \"2020-02-25T11:13:32.12Z\"}\n\n CANCEL_ORDER = {\n \"id\": \"615aa7de-3ff9-486d-98d7-2d37aca212c9\", \"marketSymbol\": \"ETH-USDT\", \"direction\": \"BUY\",\n \"type\": \"LIMIT\", \"quantity\": \"0.06000000\", \"limit\": \"205.64319999\",\n \"timeInForce\": \"GOOD_TIL_CANCELLED\", \"fillQuantity\": \"0.00000000\", \"commission\": \"0.00000000\",\n \"proceeds\": \"0.00000000\", \"status\": \"CLOSED\", \"createdAt\": \"2020-02-25T11:13:32.12Z\",\n \"updatedAt\": \"2020-02-25T11:13:33.63Z\", \"closedAt\": \"2020-02-25T11:13:33.63Z\"}\n\n ORDERS_OPEN = [\n {\n \"id\": \"9854dc2a-0762-408d-922f-882f4359c517\", \"marketSymbol\": \"ETH-USDT\", \"direction\": \"BUY\", \"type\": \"LIMIT\",\n \"quantity\": \"0.03000000\", \"limit\": \"134.75247524\", \"timeInForce\": \"GOOD_TIL_CANCELLED\",\n \"fillQuantity\": \"0.00000000\", \"commission\": \"0.00000000\", \"proceeds\": \"0.00000000\", \"status\": \"OPEN\",\n \"createdAt\": \"2020-01-10T10:25:25.13Z\", \"updatedAt\": \"2020-01-10T10:25:25.13Z\"},\n {\n \"id\": \"261d9158-c9c1-40a6-bad8-4b447a471d8f\", \"marketSymbol\": \"ETH-USDT\", \"direction\": \"BUY\", \"type\": \"LIMIT\",\n \"quantity\": \"0.03000000\", \"limit\": \"158.26732673\", \"timeInForce\": \"GOOD_TIL_CANCELLED\",\n \"fillQuantity\": \"0.00000000\", \"commission\": \"0.00000000\", \"proceeds\": \"0.00000000\", \"status\": \"OPEN\",\n \"createdAt\": \"2020-01-26T02:58:14.19Z\", \"updatedAt\": \"2020-01-26T02:58:14.19Z\"}\n ]\n\n WS_AFTER_BUY_2 = {\n 'event_type': 'uO', 'content': {\n 'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 8, 'TY': 2,\n 'o': {\n 'U': '00000000-0000-0000-0000-000000000000',\n 'I': 4551095126,\n 'OU': 'd67c837e-56c5-41e2-b65b-fe590eb06eaf',\n 'E': 'ETH-USDT', 'OT': 'LIMIT_BUY', 'Q': 0.06, 'q': 0.0,\n 'X': 269.05759499, 'n': 0.01338594, 'P': 5.35437999,\n 'PU': 267.7189995, 'Y': 1582540341630,\n 'C': 1582540341630, 'i': False, 'CI': False, 'K': False,\n 'k': False, 'J': None, 'j': None, 'u': 1582540341630,\n 'PassthroughUuid': None}},\n 'error': None,\n 'time': '2020-02-24T10:32:21'\n }\n\n WS_AFTER_BUY_1 = {\n 'event_type': 'uO', 'content': {\n 'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 13, 'TY': 0,\n 'o': {\n 'U': '00000000-0000-0000-0000-000000000000', 'I': 4564385840,\n 'OU': '615aa7de-3ff9-486d-98d7-2d37aca212c9', 'E': 'ETH-USDT',\n 'OT': 'LIMIT_BUY', 'Q': 0.06, 'q': 0.06, 'X': 205.64319999, 'n': 0.0,\n 'P': 0.0, 'PU': 0.0, 'Y': 1582629212120, 'C': None, 'i': True,\n 'CI': False, 'K': False, 'k': False, 'J': None, 'j': None,\n 'u': 1582629212120, 'PassthroughUuid': None}},\n 'error': None,\n 'time': '2020-02-25T11:13:32'\n }\n\n WS_AFTER_SELL_2 = {\n 'event_type': 'uO',\n 'content': {\n 'w': 'f8907116-4e24-4602-b691-d110b5ce1bf8', 'N': 10, 'TY': 2,\n 'o': {\n 'U': '00000000-0000-0000-0000-000000000000', 'I': 4279414326,\n 'OU': '447256cc-9335-41f3-bec9-7392804d30cd', 'E': 'ETH-USDT',\n 'OT': 'LIMIT_SELL', 'Q': 0.06, 'q': 0.0, 'X': 257.72689, 'n': 0.0129511,\n 'P': 5.18044, 'PU': 259.022, 'Y': 1582627522640, 'C': 1582627522640,\n 'i': False, 'CI': False, 'K': False, 'k': False, 'J': None, 'j': None,\n 'u': 1582627522640, 'PassthroughUuid': None}},\n 'error': None,\n 'time': '2020-02-25T10:45:22'}\n\n WS_ORDER_BOOK_SNAPSHOT = {\n 'nonce': 115097,\n 'type': 'snapshot',\n 'results': {\n 'M': 'ETH-USDT', 'N': 115097,\n 'Z': [\n {'Q': 3.7876, 'R': 261.805},\n {'Q': 3.99999998, 'R': 261.80200001},\n {'Q': 20.92267278, 'R': 261.75575521}],\n 'S': [\n {'Q': 3.618, 'R': 262.06976758},\n {'Q': 1.2, 'R': 262.06976759},\n {'Q': 4.0241, 'R': 262.07}],\n 'f': [\n {'I': 53304378, 'T': 1582604545290, 'Q': 1.75736397, 'P': 261.83, 't': 460.1306082651,\n 'F': 'FILL', 'OT': 'SELL', 'U': 'a0de16e3-6f6d-43f0-b9ea-a8c1f9835223'},\n {'I': 53304377, 'T': 1582604544910, 'Q': 0.42976603, 'P': 261.83, 't': 112.5256396349,\n 'F': 'FILL', 'OT': 'SELL', 'U': 'dc723d5e-2af5-4010-9eb2-a915f050015e'}]}\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in packings_str.strip().splitlines():
line_items = line.split(' | ')
line_items = [s.strip() for s in line_items]
name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items
packings.append({'name': name, 'material': material, 'size': size, 'N':
int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':
float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),
'CV': float(CV)})
<|reserved_special_token_0|>
for i in range(len(packings)):
if packings[i]['name'] not in seen_packing_name:
seen_packing_name.add(packings[i]['name'])
export_packing_name.append(packings[i]['name'])
else:
pass
<|reserved_special_token_1|>
packings_str = """
Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450
Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430
Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440
Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400
Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300
Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337
Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360
Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320
Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336
Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341
Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410
Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446
Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380
Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368
Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333
Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336
Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341
Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345
Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333
Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333
Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303
NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410
NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366
NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425
NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322
Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402
Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345
Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408
Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390
Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342
Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369
Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465
Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464
Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379
Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450
Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398
Glitsch CMR ring | metal | 0.5" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495
Glitsch CMR ring | metal | 1.0" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0
Glitsch CMR ring | metal | 1.5"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0
Glitsch CMR ring | metal | 1.5" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0
TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389
Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412
Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210
VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405
VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420
Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459
Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296
Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257
Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331
Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390
Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377
Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302
Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0
Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0
Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370
Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232
Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387
DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354
DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326
Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385
Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0
Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0
Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270
Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327
Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390
Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422
Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412
Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0
Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167
"""
packings = []
for line in packings_str.strip().splitlines():
line_items = line.split(' | ')
line_items = [s.strip() for s in line_items]
name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items
packings.append({'name': name, 'material': material, 'size': size, 'N':
int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':
float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),
'CV': float(CV)})
seen_packing_name = set()
export_packing_name = []
for i in range(len(packings)):
if packings[i]['name'] not in seen_packing_name:
seen_packing_name.add(packings[i]['name'])
export_packing_name.append(packings[i]['name'])
else:
pass
<|reserved_special_token_1|>
# author Dominik Capkovic
# contact: domcapkovic@gmail.com; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/
# GitHub: https://github.com/kilimetr
packings_str = '''
Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450
Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430
Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440
Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400
Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300
Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337
Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360
Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320
Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336
Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341
Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410
Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446
Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380
Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368
Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333
Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336
Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341
Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345
Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333
Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333
Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303
NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410
NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366
NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425
NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322
Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402
Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345
Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408
Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390
Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342
Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369
Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465
Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464
Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379
Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450
Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398
Glitsch CMR ring | metal | 0.5" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495
Glitsch CMR ring | metal | 1.0" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0
Glitsch CMR ring | metal | 1.5"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0
Glitsch CMR ring | metal | 1.5" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0
TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389
Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412
Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210
VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405
VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420
Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459
Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296
Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257
Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331
Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390
Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377
Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302
Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0
Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0
Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370
Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232
Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387
DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354
DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326
Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385
Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0
Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0
Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270
Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327
Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390
Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422
Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412
Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0
Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167
'''
packings = []
for line in packings_str.strip().splitlines():
line_items = line.split(" | ")
line_items = [s.strip() for s in line_items]
name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items
packings.append({
'name': name,
'material': material,
'size': size,
'N': int(N),
'a': float(a),
'eps': float(eps),
'CS': float(CS),
'CFl': float(CFl),
'Ch': float(Ch),
'CP0': float(CP0),
'CL': float(CL),
'CV': float(CV),
})
# EXPORTING PACKING NAME
seen_packing_name = set()
export_packing_name = []
for i in range(len(packings)):
if packings[i]["name"] not in seen_packing_name:
seen_packing_name.add(packings[i]["name"])
export_packing_name.append(packings[i]["name"])
else:
pass
# # EXPORT PACKING SURFACEAREA
# export_packing_surfacearea = []
# for item in packings:
# if item["name"] == type_packing:
# export_packing_surfacearea.append(item["a"])
# print(export_packing_surfacearea)
|
flexible
|
{
"blob_id": "c4f656b96ddc86ab2575bd5ec646833cce95e6a9",
"index": 1717,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in packings_str.strip().splitlines():\n line_items = line.split(' | ')\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({'name': name, 'material': material, 'size': size, 'N':\n int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':\n float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),\n 'CV': float(CV)})\n<mask token>\nfor i in range(len(packings)):\n if packings[i]['name'] not in seen_packing_name:\n seen_packing_name.add(packings[i]['name'])\n export_packing_name.append(packings[i]['name'])\n else:\n pass\n",
"step-3": "packings_str = \"\"\"\n Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450\n Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430\n Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440\n Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400\n Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300\n Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337\n Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360\n Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320\n Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336\n Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341\n Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410\n Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446\n Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380\n Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368\n Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333\n Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336\n Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341\n Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345\n Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333\n Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333\n Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303\n NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410\n NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366\n NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425\n NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322\n Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402\n Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345\n Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408\n Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390\n Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342\n Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369\n Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465\n Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464\n Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379\n Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450\n Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398\n Glitsch CMR ring | metal | 0.5\" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495\n Glitsch CMR ring | metal | 1.0\" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0\n Glitsch CMR ring | metal | 1.5\"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0\n Glitsch CMR ring | metal | 1.5\" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0\n TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389\n Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412\n Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210\n VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405\n VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420\n Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459\n Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296\n Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257\n Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331\n Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390\n Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377\n Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302\n Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0\n Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0\n Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370\n Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232\n Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387\n DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354\n DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326\n Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385\n Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0\n Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0\n Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270\n Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327\n Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390\n Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422\n Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412\n Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0\n Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167\n\"\"\"\npackings = []\nfor line in packings_str.strip().splitlines():\n line_items = line.split(' | ')\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({'name': name, 'material': material, 'size': size, 'N':\n int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':\n float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),\n 'CV': float(CV)})\nseen_packing_name = set()\nexport_packing_name = []\nfor i in range(len(packings)):\n if packings[i]['name'] not in seen_packing_name:\n seen_packing_name.add(packings[i]['name'])\n export_packing_name.append(packings[i]['name'])\n else:\n pass\n",
"step-4": "# author Dominik Capkovic \n# contact: domcapkovic@gmail.com; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/\n# GitHub: https://github.com/kilimetr\n\n\npackings_str = '''\n Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450\n Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430\n Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440\n Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400\n Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300\n Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337\n Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360\n Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320\n Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336\n Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341\n Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410\n Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446\n Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380\n Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368\n Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333\n Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336\n Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341\n Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345\n Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333\n Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333\n Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303\n NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410\n NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366\n NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425\n NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322\n Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402\n Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345\n Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408\n Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390\n Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342\n Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369\n Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465\n Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464\n Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379\n Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450\n Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398\n Glitsch CMR ring | metal | 0.5\" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495\n Glitsch CMR ring | metal | 1.0\" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0\n Glitsch CMR ring | metal | 1.5\"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0\n Glitsch CMR ring | metal | 1.5\" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0\n TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389\n Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412\n Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210\n VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405\n VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420\n Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459\n Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296\n Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257\n Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331\n Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390\n Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377\n Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302\n Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0\n Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0\n Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370\n Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232\n Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387\n DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354\n DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326\n Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385\n Mellapak | metal | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0\n Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0\n Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270\n Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327\n Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390\n Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422\n Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412\n Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0\n Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167\n'''\n\n\n\npackings = []\n\nfor line in packings_str.strip().splitlines():\n line_items = line.split(\" | \")\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({\n 'name': name,\n 'material': material,\n 'size': size,\n 'N': int(N),\n 'a': float(a),\n 'eps': float(eps),\n 'CS': float(CS),\n 'CFl': float(CFl),\n 'Ch': float(Ch),\n 'CP0': float(CP0),\n 'CL': float(CL),\n 'CV': float(CV),\n })\n\n\n\n# EXPORTING PACKING NAME\nseen_packing_name = set()\nexport_packing_name = []\n\nfor i in range(len(packings)):\n if packings[i][\"name\"] not in seen_packing_name:\n seen_packing_name.add(packings[i][\"name\"]) \n export_packing_name.append(packings[i][\"name\"])\n else:\n pass\n\n\n# # EXPORT PACKING SURFACEAREA\n# export_packing_surfacearea = []\n\n# for item in packings:\n# if item[\"name\"] == type_packing:\n# export_packing_surfacearea.append(item[\"a\"])\n\n# print(export_packing_surfacearea)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pymongo
import os,sys
import re
from db_User import *
from db_Event import *
class ClassRoom:
# 链接本地客户端
__myclient = pymongo.MongoClient("mongodb://localhost:27017")
# 创建数据库
__mydb = __myclient["MMKeyDB"]
# 创建新的集合
__mycol = __mydb["ClassRoom"]
# 判断是否输入id或是输入name,如果有输入则转译
def Name2Id(room_id,name):
bool_n = bool(re.match("教\d{1}-\d{3}",name))
bool_id = bool(re.match("B\d{1}R\d{3}",room_id))
if not (bool_id or bool_n):
return False
elif bool_n:
room_id = "B" + name[1] + "R" + name[3:6]
else:
name = "教" + room_id[1] + "-" + room_id[3:6]
return room_id,name
def __init__(self,
room_id = "",
name = "",
seats = 0,
key_id = "",
event = []):
if not(ClassRoom.Name2Id(room_id,name)):
self.WrongFlag = 1
else:
self.id,self.name = ClassRoom.Name2Id(room_id,name)
self.seats = seats
self.key_id = key_id
self.event = event
ClassRoom.PullClassroom(self)
def PullClassroom(self):
result = self.__mycol.find_one({ "_id": self.id })
if result:
self.name = self.name or result['name']
self.seats = self.seats or result['seats']
self.key_id= self.key_id or result['key_id']
self.event = self.event or result['event']
return self
else:
return False
def TurnDict(self):
mydict = {
"_id" : self.id ,
"name" : self.name,
"seats" : self.seats,
"key_id" : self.key_id,
"event" : self.event}
return mydict
def PushClassroom(self):
mydict = self.TurnDict()
if self.__mycol.find_one({ "_id": self.id }):
myquery = {"_id" : self.id}
self.__mycol.update(myquery,mydict)
return "Acc_Updated"
else:
self.__mycol.insert_one(mydict) # 上传新的document
return "Acc_Created"
def AllClassroom(self):
cursor = self.__mycol.find()
# __import__('ipdb').set_trace()
if cursor:
# index = []
# for doc in cursor:
# print(doc)
# temp = [doc['_id'],doc['name'],doc['seats'],doc['event']]
# index.append(temp)
return cursor
else:
return False
# 删除教室记录
def Delete(self):
User.mycol.delete_one({"_id": self.id})
return "Deleted"
if __name__ == '__main__':
index = ClassRoom().AllClassroom()
for i in index:
print(i)
|
normal
|
{
"blob_id": "8dae8a89d08bc522f9a5fdde8aeb9e322fafcbec",
"index": 3251,
"step-1": "<mask token>\n\n\nclass ClassRoom:\n <mask token>\n <mask token>\n <mask token>\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClassRoom:\n <mask token>\n <mask token>\n <mask token>\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n\n def Delete(self):\n User.mycol.delete_one({'_id': self.id})\n return 'Deleted'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClassRoom:\n __myclient = pymongo.MongoClient('mongodb://localhost:27017')\n __mydb = __myclient['MMKeyDB']\n __mycol = __mydb['ClassRoom']\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n\n def Delete(self):\n User.mycol.delete_one({'_id': self.id})\n return 'Deleted'\n\n\nif __name__ == '__main__':\n index = ClassRoom().AllClassroom()\n for i in index:\n print(i)\n",
"step-4": "import pymongo\nimport os, sys\nimport re\nfrom db_User import *\nfrom db_Event import *\n\n\nclass ClassRoom:\n __myclient = pymongo.MongoClient('mongodb://localhost:27017')\n __mydb = __myclient['MMKeyDB']\n __mycol = __mydb['ClassRoom']\n\n def Name2Id(room_id, name):\n bool_n = bool(re.match('教\\\\d{1}-\\\\d{3}', name))\n bool_id = bool(re.match('B\\\\d{1}R\\\\d{3}', room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = 'B' + name[1] + 'R' + name[3:6]\n else:\n name = '教' + room_id[1] + '-' + room_id[3:6]\n return room_id, name\n\n def __init__(self, room_id='', name='', seats=0, key_id='', event=[]):\n if not ClassRoom.Name2Id(room_id, name):\n self.WrongFlag = 1\n else:\n self.id, self.name = ClassRoom.Name2Id(room_id, name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({'_id': self.id})\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id = self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {'_id': self.id, 'name': self.name, 'seats': self.seats,\n 'key_id': self.key_id, 'event': self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({'_id': self.id}):\n myquery = {'_id': self.id}\n self.__mycol.update(myquery, mydict)\n return 'Acc_Updated'\n else:\n self.__mycol.insert_one(mydict)\n return 'Acc_Created'\n\n def AllClassroom(self):\n cursor = self.__mycol.find()\n if cursor:\n return cursor\n else:\n return False\n\n def Delete(self):\n User.mycol.delete_one({'_id': self.id})\n return 'Deleted'\n\n\nif __name__ == '__main__':\n index = ClassRoom().AllClassroom()\n for i in index:\n print(i)\n",
"step-5": "import pymongo\nimport os,sys\nimport re\n\n\nfrom db_User import *\nfrom db_Event import *\n\n\nclass ClassRoom:\n # 链接本地客户端\n __myclient = pymongo.MongoClient(\"mongodb://localhost:27017\")\n # 创建数据库\n __mydb = __myclient[\"MMKeyDB\"]\n # 创建新的集合\n __mycol = __mydb[\"ClassRoom\"]\n\n # 判断是否输入id或是输入name,如果有输入则转译\n def Name2Id(room_id,name):\n bool_n = bool(re.match(\"教\\d{1}-\\d{3}\",name))\n bool_id = bool(re.match(\"B\\d{1}R\\d{3}\",room_id))\n if not (bool_id or bool_n):\n return False\n elif bool_n:\n room_id = \"B\" + name[1] + \"R\" + name[3:6]\n else:\n name = \"教\" + room_id[1] + \"-\" + room_id[3:6]\n\n return room_id,name\n\n def __init__(self,\n room_id = \"\",\n name = \"\",\n seats = 0,\n key_id = \"\",\n event = []):\n\n if not(ClassRoom.Name2Id(room_id,name)):\n self.WrongFlag = 1\n else:\n self.id,self.name = ClassRoom.Name2Id(room_id,name)\n self.seats = seats\n self.key_id = key_id\n self.event = event\n ClassRoom.PullClassroom(self)\n\n def PullClassroom(self):\n result = self.__mycol.find_one({ \"_id\": self.id })\n if result:\n self.name = self.name or result['name']\n self.seats = self.seats or result['seats']\n self.key_id= self.key_id or result['key_id']\n self.event = self.event or result['event']\n return self\n else:\n return False\n\n def TurnDict(self):\n mydict = {\n \"_id\" : self.id ,\n \"name\" : self.name,\n \"seats\" : self.seats,\n \"key_id\" : self.key_id,\n \"event\" : self.event}\n return mydict\n\n def PushClassroom(self):\n mydict = self.TurnDict()\n if self.__mycol.find_one({ \"_id\": self.id }):\n myquery = {\"_id\" : self.id}\n self.__mycol.update(myquery,mydict)\n return \"Acc_Updated\"\n else:\n self.__mycol.insert_one(mydict) # 上传新的document\n return \"Acc_Created\"\n \n def AllClassroom(self):\n cursor = self.__mycol.find()\n # __import__('ipdb').set_trace()\n if cursor:\n # index = []\n # for doc in cursor:\n # print(doc)\n # temp = [doc['_id'],doc['name'],doc['seats'],doc['event']]\n # index.append(temp)\n return cursor\n else:\n return False\n\n # 删除教室记录\n def Delete(self):\n User.mycol.delete_one({\"_id\": self.id})\n return \"Deleted\"\n\nif __name__ == '__main__':\n index = ClassRoom().AllClassroom()\n for i in index:\n print(i)\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = 'sender', 'receiver', 'content', 'creation_date'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TwitSerializer(serializers.ModelSerializer):
class Meta:
model = Twit
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = 'sender', 'receiver', 'content', 'creation_date'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import Twit, Comment, Message
from django.contrib.auth.models import User
class TwitSerializer(serializers.ModelSerializer):
class Meta:
model = Twit
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = 'sender', 'receiver', 'content', 'creation_date'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import Twit, Comment, Message
from django.contrib.auth.models import User
class TwitSerializer(serializers.ModelSerializer):
class Meta:
model = Twit
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('sender', 'receiver', 'content', 'creation_date')
|
flexible
|
{
"blob_id": "536a67935527eb99bc0424613c9b931401db0b06",
"index": 6461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Message\n fields = 'sender', 'receiver', 'content', 'creation_date'\n",
"step-3": "<mask token>\n\n\nclass TwitSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Twit\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Message\n fields = 'sender', 'receiver', 'content', 'creation_date'\n",
"step-4": "from rest_framework import serializers\nfrom .models import Twit, Comment, Message\nfrom django.contrib.auth.models import User\n\n\nclass TwitSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Twit\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Message\n fields = 'sender', 'receiver', 'content', 'creation_date'\n",
"step-5": "from rest_framework import serializers\nfrom .models import Twit, Comment, Message\nfrom django.contrib.auth.models import User\n\nclass TwitSerializer(serializers.ModelSerializer):\n class Meta:\n model = Twit\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass MessageSerializer(serializers.ModelSerializer):\n class Meta:\n model = Message\n fields = ('sender', 'receiver', 'content', 'creation_date')\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('venue', '0001_initial')]
operations = [migrations.CreateModel(name='Images', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('image', versatileimagefield.fields.
VersatileImageField(upload_to=b'images', verbose_name=b'Image')), (
'created_at', models.DateTimeField(help_text=
b'Date when category created.', verbose_name=b'Created At',
auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=
b'Date when category updated.', verbose_name=b'Updated At',
auto_now=True)), ('category', models.ForeignKey(related_name=
'images', blank=True, to='venue.Category', null=True))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models, migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [('venue', '0001_initial')]
operations = [migrations.CreateModel(name='Images', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('image', versatileimagefield.fields.
VersatileImageField(upload_to=b'images', verbose_name=b'Image')), (
'created_at', models.DateTimeField(help_text=
b'Date when category created.', verbose_name=b'Created At',
auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=
b'Date when category updated.', verbose_name=b'Updated At',
auto_now=True)), ('category', models.ForeignKey(related_name=
'images', blank=True, to='venue.Category', null=True))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [
('venue', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', versatileimagefield.fields.VersatileImageField(upload_to=b'images', verbose_name=b'Image')),
('created_at', models.DateTimeField(help_text=b'Date when category created.', verbose_name=b'Created At', auto_now_add=True)),
('updated_at', models.DateTimeField(help_text=b'Date when category updated.', verbose_name=b'Updated At', auto_now=True)),
('category', models.ForeignKey(related_name='images', blank=True, to='venue.Category', null=True)),
],
),
]
|
flexible
|
{
"blob_id": "09bf7460b2c928bf6e1346d9d1e2e1276540c080",
"index": 3099,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('venue', '0001_initial')]\n operations = [migrations.CreateModel(name='Images', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('image', versatileimagefield.fields.\n VersatileImageField(upload_to=b'images', verbose_name=b'Image')), (\n 'created_at', models.DateTimeField(help_text=\n b'Date when category created.', verbose_name=b'Created At',\n auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=\n b'Date when category updated.', verbose_name=b'Updated At',\n auto_now=True)), ('category', models.ForeignKey(related_name=\n 'images', blank=True, to='venue.Category', null=True))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nimport versatileimagefield.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('venue', '0001_initial')]\n operations = [migrations.CreateModel(name='Images', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('image', versatileimagefield.fields.\n VersatileImageField(upload_to=b'images', verbose_name=b'Image')), (\n 'created_at', models.DateTimeField(help_text=\n b'Date when category created.', verbose_name=b'Created At',\n auto_now_add=True)), ('updated_at', models.DateTimeField(help_text=\n b'Date when category updated.', verbose_name=b'Updated At',\n auto_now=True)), ('category', models.ForeignKey(related_name=\n 'images', blank=True, to='venue.Category', null=True))])]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport versatileimagefield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('venue', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Images',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', versatileimagefield.fields.VersatileImageField(upload_to=b'images', verbose_name=b'Image')),\n ('created_at', models.DateTimeField(help_text=b'Date when category created.', verbose_name=b'Created At', auto_now_add=True)),\n ('updated_at', models.DateTimeField(help_text=b'Date when category updated.', verbose_name=b'Updated At', auto_now=True)),\n ('category', models.ForeignKey(related_name='images', blank=True, to='venue.Category', null=True)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# - *- coding: utf- 8 - *-
import RPi.GPIO as io
import time
import math
io.setmode(io.BOARD)
hz = 50
dt = 1/hz
kr = 48
enc_res = 0.01636246
num_samples = 100
special_words = ['BackSpace', 'Tab', 'Enter', 'Cap', 'Shift2', 'Ctrl1',
'WIN1', 'Alt1', 'Alt2', 'WIN2', 'MClick', 'Ctrl2', 'Shift1', '\\']
L1=0.115 # m
L2=0.064 # m
a1=0.018 # m
# mm units converted at output of keypose
off = 56 # mm
a0=50+off # mm
zz=24 # mm
zs=float (19) #mm
yy=18 #mm
k=float (0.048)
keydic={'Ctrl1':[0,-(a0+20),12],
'WIN1':[0,-(a0+20),12+zz],
'Alt1':[0,-(a0+20),12+2*zz],
' ':[0,-(a0+20),130],
'Alt2':[0,-(a0+20),130+12+2*zz],
'WIN2':[0,-(a0+20),142+3*zz],
'MClick':[0,-(a0+20),142+4*zz],
'Ctrl2':[0,-(a0+20),142+5*zz],
'Shift1':[float (k*yy),-(a0+20+yy),22],
'z':[float (k*yy),-(a0+20+yy),53],
'Z':[float (k*yy),-(a0+20+yy),53],
'x':[float (k*yy),-(a0+20+yy),53+zs],
'X':[float (k*yy),-(a0+20+yy),53+zs],
'c':[float (k*yy),-(a0+20+yy),53+2*zs],
'C':[float (k*yy),-(a0+20+yy),53+2*zs],
'v':[float (k*yy),-(a0+20+yy),53+3*zs],
'V':[float (k*yy),-(a0+20+yy),53+3*zs],
'b':[float (k*yy),-(a0+20+yy),53+4*zs],
'B':[float (k*yy),-(a0+20+yy),53+4*zs],
'n':[float (k*yy),-(a0+20+yy),53+5*zs],
'N':[float (k*yy),-(a0+20+yy),53+5*zs],
'm':[float (k*yy),-(a0+20+yy),53+6*zs],
'M':[float (k*yy),-(a0+20+yy),53+6*zs],
',':[float (k*yy),-(a0+20+yy),53+7*zs],
'.':[float (k*yy),-(a0+20+yy),53+8*zs],
'/':[float (k*yy),-(a0+20+yy),53+9*zs],
'Shift2':[float (k*yy),-(a0+20+yy),22+10*zs+43],
'Cap':[float (2*k*yy),-(a0+20+2*yy),17],
'a':[float (2*k*yy),-(a0+20+2*yy),43],
'A':[float (2*k*yy),-(a0+20+2*yy),43],
's':[float (2*k*yy),-(a0+20+2*yy),43+zs],
'S':[float (2*k*yy),-(a0+20+2*yy),43+zs],
'd':[float (2*k*yy),-(a0+20+2*yy),43+2*zs],
'D':[float (2*k*yy),-(a0+20+2*yy),43+2*zs],
'f':[float (2*k*yy),-(a0+20+2*yy),43+3*zs],
'F':[float (2*k*yy),-(a0+20+2*yy),43+3*zs],
'g':[float (2*k*yy),-(a0+20+2*yy),43+4*zs],
'G':[float (2*k*yy),-(a0+20+2*yy),43+4*zs],
'h':[float (2*k*yy),-(a0+20+2*yy),43+5*zs],
'H':[float (2*k*yy),-(a0+20+2*yy),43+5*zs],
'j':[float (2*k*yy),-(a0+20+2*yy),43+6*zs],
'J':[float (2*k*yy),-(a0+20+2*yy),43+6*zs],
'k':[float (2*k*yy),-(a0+20+2*yy),43+7*zs],
'K':[float (2*k*yy),-(a0+20+2*yy),43+7*zs],
'l':[float (2*k*yy),-(a0+20+2*yy),43+8*zs],
'L':[float (2*k*yy),-(a0+20+2*yy),43+8*zs],
';':[float (2*k*yy),-(a0+20+2*yy),43+9*zs],
'\\':[float (2*k*yy),-(a0+20+2*yy),43+10*zs],
'Enter':[float (2*k*yy),-(a0+20+2*yy),33+20+11*zs],
'Tab':[float (3*k*yy),-(a0+20+3*yy),15],
'q':[float (3*k*yy),-(a0+20+3*yy),39],
'Q':[float (3*k*yy),-(a0+20+3*yy),39],
'w':[float (3*k*yy),-(a0+20+3*yy),zs+39],
'W':[float (3*k*yy),-(a0+20+3*yy),zs+39],
'e':[float (3*k*yy),-(a0+20+3*yy),2*zs+39],
'E':[float (3*k*yy),-(a0+20+3*yy),2*zs+39],
'r':[float (3*k*yy),-(a0+20+3*yy),3*zs+39],
'R':[float (3*k*yy),-(a0+20+3*yy),3*zs+39],
't':[float (3*k*yy),-(a0+20+3*yy),4*zs+39],
'T':[float (3*k*yy),-(a0+20+3*yy),4*zs+39],
'y':[float (3*k*yy),-(a0+20+3*yy),5*zs+39],
'Y':[float (3*k*yy),-(a0+20+3*yy),5*zs+39],
'u':[float (3*k*yy),-(a0+20+3*yy),6*zs+39],
'U':[float (3*k*yy),-(a0+20+3*yy),6*zs+39],
'i':[float (3*k*yy),-(a0+20+3*yy),7*zs+39],
'I':[float (3*k*yy),-(a0+20+3*yy),7*zs+39],
'o':[float (3*k*yy),-(a0+20+3*yy),8*zs+39],
'O':[float (3*k*yy),-(a0+20+3*yy),8*zs+39],
'p':[float (3*k*yy),-(a0+20+3*yy),9*zs+39],
'P':[float (3*k*yy),-(a0+20+3*yy),9*zs+39],
'[':[float (3*k*yy),-(a0+20+3*yy),10*zs+39],
']':[float (3*k*yy),-(a0+20+3*yy),11*zs+39],
'\\':[float (3*k*yy),-(a0+20+3*yy),12*zs+30+14],
'`':[float (4*k*yy),-(a0+20+4*yy),float (zs/2)],
'1':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+zs)],
'2':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+2*zs)],
'3':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+3*zs)],
'4':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+4*zs)],
'5':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+5*zs)],
'6':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+6*zs)],
'7':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+7*zs)],
'8':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+8*zs)],
'9':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+9*zs)],
'0':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+10*zs)],
'-':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+11*zs)],
'=':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+12*zs)],
'BackSpace':[float (4*k*yy),-(a0+20+4*yy),13*zs+19]
}
# set parameters of robot (SI UNITS)
L1,L2=0.115,0.064
len_link1=0.07
len_link2=0.04#distances of centers of mass from joint axes
m_link1=0.005
m_link2=0.003
m_motor=0.06
k=0.048
R=3.6
V=5
r_pulley=0.0181102/2 #unit meters
K_p,K_d=0.25,0.125
# motor 1
m1_in1_pin = 12
m1_in2_pin = 16
m1_en_pin = 18
chan_list = [m1_en_pin, m1_in1_pin, m1_in2_pin]
io.setup(chan_list, io.OUT)
p1 = io.PWM(m1_in1_pin, hz)
p2 = io.PWM(m1_in2_pin, hz)
# motor 2
m2_in1_pin = 22
m2_in2_pin = 32
m2_en_pin = 36
chan_list = [m2_en_pin, m2_in1_pin, m2_in2_pin]
io.setup(chan_list, io.OUT)
p3 = io.PWM(m2_in1_pin, hz)
p4 = io.PWM(m2_in2_pin, hz)
# motor 3
m3_in1_pin = 38
m3_in2_pin = 40
m3_en_pin = 37
chan_list = [m3_en_pin, m3_in1_pin, m3_in2_pin]
io.setup(chan_list, io.OUT)
p5 = io.PWM(m3_in1_pin, hz)
p6 = io.PWM(m3_in2_pin, hz)
# sensor 1
en1_pin = 35
io.setup(en1_pin, io.IN, pull_up_down=io.PUD_UP)
# sensor 2
en2_pin = 33
io.setup(en2_pin, io.IN, pull_up_down=io.PUD_UP)
# encoder 1
encoder1_sensors = [en1_pin, en2_pin]
A1_old = 0
encoder1_count = 0
A1_t1 = time.time()
vel1 = 0
vel1_vec = []
# sensor 3
en3_pin = 31
io.setup(en3_pin, io.IN, pull_up_down=io.PUD_UP)
# sensor 4
en4_pin = 29
io.setup(en4_pin, io.IN, pull_up_down=io.PUD_UP)
# encoder 2
encoder2_sensors = [en3_pin, en4_pin]
A2_old = 0
encoder2_count = 0
A2_t1 = time.time()
vel2 = 0
vel2_vec = []
# sensor 5
en5_pin = 15
io.setup(en5_pin, io.IN, pull_up_down=io.PUD_UP)
# sensor 6
en6_pin = 13
io.setup(en6_pin, io.IN, pull_up_down=io.PUD_UP)
# encoder 3
encoder3_sensors = [en5_pin, en6_pin]
A3_old = 0
encoder3_count = 0
A3_t1 = time.time()
vel3 = 0
vel3_vec = []
def clockwise(duty, pwm1, pwm2, en_pin):
io.output(en_pin, io.HIGH)
pwm1.start(duty)
time.sleep(duty/100*dt)
pwm2.start(100-duty)
def counter_clockwise(duty, pwm1, pwm2, en_pin):
io.output(en_pin, io.HIGH)
pwm2.start(duty)
time.sleep(duty/100*dt)
pwm1.start(100-duty)
def countstorad(count):
# returns the joints space angle in radians
rad = 2*math.pi*count/8/kr
return rad
def radtocount(rad):
count = rad*kr*8/(2*math.pi)
return count
def initializeEncoders():
global encoder2_count, encoder3_count
encoder2_count = 0
encoder3_count = -math.pi/2
def resetEncoders():
global encoder1_count, encoder2_count, encoder3_count
encoder1_count = 0
encoder2_count = 0
encoder3_count = radtocount(-math.pi/2)
def encoder1Callback(channel):
# this function is called when an encoder reading is detected
global A1_old, encoder1_count, A1_t1, vel1, vel1_vec
A1_t2 = time.time()
if io.input(channel):
A = 1
else:
A = 0
if io.input(encoder1_sensors[1]):
B = 1
else:
B = 0
if A != A1_old:
if A != B:
encoder1_count += 1
vel1_vec.insert(0,enc_res/(A1_t2 - A1_t1))
else:
encoder1_count -= 1
vel1_vec.insert(0,-enc_res/(A1_t2 - A1_t1))
if len(vel1_vec) > num_samples:
vel1_vec.pop()
vel1 = sum(vel1_vec)/len(vel1_vec)
A1_old = A
A1_t1 = A1_t2
io.add_event_detect(en1_pin, io.BOTH, callback=encoder1Callback)
def encoder2Callback(channel):
# this function is called when an encoder reading is detected
global A2_old, encoder2_count, A2_t1, vel2, vel2_vec
A2_t2 = time.time()
if io.input(channel):
A = 1
else:
A = 0
if io.input(encoder2_sensors[1]):
B = 1
else:
B = 0
if A != A2_old:
if A != B:
encoder2_count -= 1
vel2_vec.insert(0,-enc_res/(A2_t2 - A2_t1))
else:
encoder2_count += 1
vel2_vec.insert(0,enc_res/(A2_t2 - A2_t1))
if len(vel2_vec) > num_samples:
vel2_vec.pop()
vel2 = sum(vel2_vec)/len(vel2_vec)
A2_old = A
A2_t1 = A2_t2
io.add_event_detect(en3_pin, io.BOTH, callback=encoder2Callback)
def encoder3Callback(channel):
# this function is called when an encoder reading is detected
global A3_old, encoder3_count, A3_t1, vel3, vel3_vec
A3_t2 = time.time()
if io.input(channel):
A = 1
else:
A = 0
if io.input(encoder3_sensors[1]):
B = 1
else:
B = 0
if A != A3_old:
if A != B:
encoder3_count -= 1
vel3_vec.insert(0,-enc_res/(A3_t2 - A3_t1))
else:
encoder3_count += 1
vel3_vec.insert(0,enc_res/(A3_t2 - A3_t1))
if len(vel3_vec) > num_samples:
vel3_vec.pop()
vel3 = sum(vel3_vec)/len(vel3_vec)
A3_old = A
A3_t1 = A3_t2
io.add_event_detect(en5_pin, io.BOTH, callback=encoder3Callback)
def invskinem(pose=[0,-.1,0]):
d1=pose[2]/(r_pulley)
c3=float(((pose[0]-a1)**2+pose[1]**2-L1**2-L2**2)/(2*L1*L2))
try:
s3=-math.sqrt(1-c3**2)
except:
print('Whoops!')
return
th3=math.atan2(s3,c3)
k=((pose[0]-a1)**2+L1**2+pose[1]**2-L2**2)/(2*L1)
th2=math.atan2(math.sqrt((pose[0]-a1)**2+pose[1]**2-k**2),k)+math.atan2(pose[1],pose[0]-a1)
return [d1,th2,th3]
def keypose(read):
if read in keydic:
output=keydic.get(read)
output = [float(output[0])/1000, float(output[1])/1000, float(output[2])/1000]
return output
else:
print('Whoops! No keys found!')
return
def control1(pos_d):
try:
# initialize the encoders
##################################################
#This is for motor1 control
##################################################
tolerance=0.005
pos_error1=100
f = open('data','a')
f.write('New Data Theta 1 \n')
print("Controlling motor 1")
while abs(pos_error1) >=tolerance:
pos_error1=pos_d[0]-countstorad(encoder1_count)
duty_cycle_1=100
if pos_error1>0:
clockwise(duty_cycle_1, p1, p2, m1_en_pin)
elif pos_error1<0:
clockwise(100-duty_cycle_1,p1,p2,m1_en_pin)
row = str(countstorad(encoder1_count))+'\t'+str(vel1)+'\n'
f.write(row)
p1.stop()
p2.stop()
time.sleep(2)
##################################################
#This is for motor2 and motor3 control
##################################################
print("Controlling Motors 2 and 3")
f.write('New Data Theta 2, Theta 3 \n')
position_error=[100,100]
while max(abs(position_error[0]),abs(position_error[1])) > tolerance:
# get current position
pos_current=[countstorad(encoder2_count),countstorad(encoder3_count)]
angular_velocity=[vel2,vel3]
row = str(countstorad(encoder2_count))+'\t'+str(vel2)+'\t'+str(countstorad(encoder3_count))+'\t'+str(vel3)+'\n'
f.write(row)
# estimate g(q)
g_q=[(m_link1*len_link1+m_motor*L1+m_link2*L1)*math.cos(pos_current[0])+\
m_link2*len_link2*math.cos(pos_current[0]+pos_current[1]),\
m_link2*len_link2*math.cos(pos_current[0]+pos_current[1])]
# calculate position error
position_error=[pos_d[1]-pos_current[0],pos_d[2]-pos_current[1]]
# u = PD control with gravity compensation
u=[g_q[0]+K_p*position_error[0]-K_d*angular_velocity[0],\
g_q[1]+K_p*position_error[1]-K_d*angular_velocity[1]]
for i in range(2):
if u[i]>=0.08:
u[i]=0.08
elif u[i]<=-0.08:
u[i]=-0.08
# duty = function(u)
V_d=[R*u[0]/k+k*angular_velocity[0],R*u[1]/k+k*angular_velocity[1]]
duty=[V_d[0]/V*100,V_d[1]/V*100]
# move the motors according to duty
#motor1 duty cycle ##############################
if duty[0]>0:
if duty[0]>=100:
duty[0]=100
elif duty[0]<=70:
duty[0]=50
clockwise(duty[0], p3, p4, m2_en_pin)
else:
if duty[0]<=-100:
duty[0]=0
elif duty[0] > -100 and duty[0] <= -70:
duty[0]=100+duty[0]
elif duty[0]>-70:
duty[0]=50
clockwise(duty[0],p3,p4,m2_en_pin)
###################################################
#motor2 duty cycle ################################
if duty[1]>0:
if duty[1]>=100:
duty[1]=100
elif duty[1]<=70:
duty[1]=50
clockwise(duty[1], p5, p6, m3_en_pin)
else:
if duty[1]<=-100:
duty[1]=0
elif duty[1] > -100 and duty[1] <= -70:
duty[1]=100+duty[1]
elif duty[1]>-70:
duty[1]=50
clockwise(duty[1],p5,p6,m3_en_pin)
####################################################
def control2(pos_d):
try:
# initialize the encoders
tolerance=0.005
pos_error1=100
##################################################
#This is for motor2 and motor3 control
##################################################
print("Controlling Motors 2 and 3")
position_error=[100,100]
f.open('data','a')
f.write('New Data Theta 2 Theta 3 \n')
while max(abs(position_error[0]),abs(position_error[1])) > tolerance:
# get current position
pos_current=[countstorad(encoder2_count),countstorad(encoder3_count)]
angular_velocity=[vel2,vel3]
row = str(countstorad(encoder2_count))+'\t'+str(vel2)+'\t'+str(countstorad(encoder3_count))+'\t'+str(vel3)+'\n'
f.write(row)
# estimate g(q)
g_q=[(m_link1*len_link1+m_motor*L1+m_link2*L1)*math.cos(pos_current[0])+\
m_link2*len_link2*math.cos(pos_current[0]+pos_current[1]),\
m_link2*len_link2*math.cos(pos_current[0]+pos_current[1])]
# calculate position error
position_error=[pos_d[1]-pos_current[0],pos_d[2]-pos_current[1]]
# u = PD control with gravity compensation
u=[g_q[0]+K_p*position_error[0]-K_d*angular_velocity[0],\
g_q[1]+K_p*position_error[1]-K_d*angular_velocity[1]]
for i in range(2):
if u[i]>=0.08:
u[i]=0.08
elif u[i]<=-0.08:
u[i]=-0.08
# duty = function(u)
V_d=[R*u[0]/k+k*angular_velocity[0],R*u[1]/k+k*angular_velocity[1]]
duty=[V_d[0]/V*100,V_d[1]/V*100]
# move the motors according to duty
#motor1 duty cycle ##############################
if duty[0]>0:
if duty[0]>=100:
duty[0]=100
elif duty[0]<=70:
duty[0]=50
clockwise(duty[0], p3, p4, m2_en_pin)
else:
if duty[0]<=-100:
duty[0]=0
elif duty[0] > -100 and duty[0] <= -70:
duty[0]=100+duty[0]
elif duty[0]>-70:
duty[0]=50
clockwise(duty[0],p3,p4,m2_en_pin)
###################################################
#motor2 duty cycle ################################
if duty[1]>0:
if duty[1]>=100:
duty[1]=100
elif duty[1]<=70:
duty[1]=50
clockwise(duty[1], p5, p6, m3_en_pin)
else:
if duty[1]<=-100:
duty[1]=0
elif duty[1] > -100 and duty[1] <= -70:
duty[1]=100+duty[1]
elif duty[1]>-70:
duty[1]=50
clockwise(duty[1],p5,p6,m3_en_pin)
####################################################
##################################################
#This is for motor1 control
##################################################
print("Controlling motor 1")
while abs(pos_error1) >=tolerance:
row = str(countstorad(encoder1_count))+'\t'str(vel1)
f.write(row)
pos_error1=pos_d[0]-countstorad(encoder1_count)
duty_cycle_1=100
if pos_error1>0:
clockwise(duty_cycle_1, p1, p2, m1_en_pin)
elif pos_error1<0:
clockwise(100-duty_cycle_1,p1,p2,m1_en_pin)
p1.stop()
p2.stop()
p3.stop()
p4.stop()
p5.stop()
p6.stop()
except KeyboardInterrupt:
p1.stop()
p2.stop()
p3.stop()
p4.stop()
p5.stop()
p6.stop()
io.cleanup()
def correctEncoders(desired):
global encoder1_count, encoder2_count, encoder3_count
pos_correct = [radtocount(desired[0]), radtocount(desired[1]), radtocount(desired[2])]
encoder1_count = pos_correct[0]
encoder2_count = pos_correct[1]
encoder3_count = pos_correct[2]
def taskcontrol(command_list):
initializeEncoders()
n = len(command_list)
for i in xrange(0,n):
# for each key in string_desired
# get position
current_pos = [countstorad(encoder1_count), countstorad(encoder2_count), countstorad(encoder3_count)]
# get nearest home position (theta 1 is arbitrary)
nearest_home = [current_pos[0], 0, -math.pi/2]
print(current_pos)
if (current_pos[1] != nearest_home[1]) or (current_pos[2] != nearest_home[2]):
print("Going to nearest home")
control2(nearest_home)
correctEncoders(nearest_home)
print("I'm Home!")
time.sleep(1)
cart_pos_d = keypose(command_list[i])
# getpose(key_desired)
joint_pos_d = invskinem(cart_pos_d)
# inverse kinematics to find joint space position
print("Move to "+command_list[i])
control1(joint_pos_d)
correctEncoders(joint_pos_d)
print("Motion Complete!")
time.sleep(1)
# control(pose_desired)
# end for loop
abs_home = [0, 0, -math.pi/2]
# return to global home position
print("Going to Absolute Home Position")
control2(abs_home)
correctEncoders(abs_home)
# This will run when executing the python file, causing the robot to type 'hello'
command_list = ['h','e','l','l','o']
taskcontrol(command_list)
|
normal
|
{
"blob_id": "ce1ef1ce538b8753af9e4b3e8e88f4cde9a2d860",
"index": 9620,
"step-1": "# - *- coding: utf- 8 - *-\r\n\r\nimport RPi.GPIO as io\r\nimport time\r\nimport math\r\n\r\nio.setmode(io.BOARD)\r\n\r\nhz = 50\r\ndt = 1/hz\r\nkr = 48\r\nenc_res = 0.01636246\r\nnum_samples = 100\r\nspecial_words = ['BackSpace', 'Tab', 'Enter', 'Cap', 'Shift2', 'Ctrl1', \r\n\t'WIN1', 'Alt1', 'Alt2', 'WIN2', 'MClick', 'Ctrl2', 'Shift1', '\\\\']\r\nL1=0.115 # m\r\nL2=0.064 # m\r\na1=0.018 # m\r\n\r\n# mm units converted at output of keypose\r\noff = 56 # mm\r\na0=50+off # mm \r\nzz=24 # mm\r\nzs=float (19) #mm\r\nyy=18 #mm\r\nk=float (0.048)\r\nkeydic={'Ctrl1':[0,-(a0+20),12],\r\n\t\t'WIN1':[0,-(a0+20),12+zz],\r\n\t\t'Alt1':[0,-(a0+20),12+2*zz],\r\n\t\t' ':[0,-(a0+20),130],\r\n\t\t'Alt2':[0,-(a0+20),130+12+2*zz],\r\n\t\t'WIN2':[0,-(a0+20),142+3*zz],\r\n\t\t'MClick':[0,-(a0+20),142+4*zz],\r\n\t\t'Ctrl2':[0,-(a0+20),142+5*zz],\r\n\t\t'Shift1':[float (k*yy),-(a0+20+yy),22],\r\n\t\t'z':[float (k*yy),-(a0+20+yy),53],\r\n\t\t'Z':[float (k*yy),-(a0+20+yy),53],\r\n\t\t'x':[float (k*yy),-(a0+20+yy),53+zs],\r\n\t\t'X':[float (k*yy),-(a0+20+yy),53+zs],\r\n\t\t'c':[float (k*yy),-(a0+20+yy),53+2*zs],\r\n\t\t'C':[float (k*yy),-(a0+20+yy),53+2*zs],\r\n\t\t'v':[float (k*yy),-(a0+20+yy),53+3*zs],\r\n\t\t'V':[float (k*yy),-(a0+20+yy),53+3*zs],\r\n\t\t'b':[float (k*yy),-(a0+20+yy),53+4*zs],\r\n\t\t'B':[float (k*yy),-(a0+20+yy),53+4*zs],\r\n\t\t'n':[float (k*yy),-(a0+20+yy),53+5*zs],\r\n\t\t'N':[float (k*yy),-(a0+20+yy),53+5*zs],\r\n\t\t'm':[float (k*yy),-(a0+20+yy),53+6*zs],\r\n\t\t'M':[float (k*yy),-(a0+20+yy),53+6*zs],\r\n\t\t',':[float (k*yy),-(a0+20+yy),53+7*zs],\r\n\t\t'.':[float (k*yy),-(a0+20+yy),53+8*zs],\r\n\t\t'/':[float (k*yy),-(a0+20+yy),53+9*zs],\r\n\t\t'Shift2':[float (k*yy),-(a0+20+yy),22+10*zs+43],\r\n\t\t'Cap':[float (2*k*yy),-(a0+20+2*yy),17],\r\n\t\t'a':[float (2*k*yy),-(a0+20+2*yy),43],\r\n\t\t'A':[float (2*k*yy),-(a0+20+2*yy),43],\r\n\t\t's':[float (2*k*yy),-(a0+20+2*yy),43+zs],\r\n\t\t'S':[float (2*k*yy),-(a0+20+2*yy),43+zs],\r\n\t\t'd':[float (2*k*yy),-(a0+20+2*yy),43+2*zs],\r\n\t\t'D':[float (2*k*yy),-(a0+20+2*yy),43+2*zs],\r\n\t\t'f':[float (2*k*yy),-(a0+20+2*yy),43+3*zs],\r\n\t\t'F':[float (2*k*yy),-(a0+20+2*yy),43+3*zs],\r\n\t\t'g':[float (2*k*yy),-(a0+20+2*yy),43+4*zs],\r\n\t\t'G':[float (2*k*yy),-(a0+20+2*yy),43+4*zs],\r\n\t\t'h':[float (2*k*yy),-(a0+20+2*yy),43+5*zs],\r\n\t\t'H':[float (2*k*yy),-(a0+20+2*yy),43+5*zs],\r\n\t\t'j':[float (2*k*yy),-(a0+20+2*yy),43+6*zs],\r\n\t\t'J':[float (2*k*yy),-(a0+20+2*yy),43+6*zs],\r\n\t\t'k':[float (2*k*yy),-(a0+20+2*yy),43+7*zs],\r\n\t\t'K':[float (2*k*yy),-(a0+20+2*yy),43+7*zs],\r\n\t\t'l':[float (2*k*yy),-(a0+20+2*yy),43+8*zs],\r\n\t\t'L':[float (2*k*yy),-(a0+20+2*yy),43+8*zs],\r\n\t\t';':[float (2*k*yy),-(a0+20+2*yy),43+9*zs],\r\n\t\t'\\\\':[float (2*k*yy),-(a0+20+2*yy),43+10*zs],\r\n\t\t'Enter':[float (2*k*yy),-(a0+20+2*yy),33+20+11*zs],\r\n\t\t'Tab':[float (3*k*yy),-(a0+20+3*yy),15],\r\n\t\t'q':[float (3*k*yy),-(a0+20+3*yy),39],\r\n\t\t'Q':[float (3*k*yy),-(a0+20+3*yy),39],\r\n\t\t'w':[float (3*k*yy),-(a0+20+3*yy),zs+39],\r\n\t\t'W':[float (3*k*yy),-(a0+20+3*yy),zs+39],\r\n\t\t'e':[float (3*k*yy),-(a0+20+3*yy),2*zs+39],\r\n\t\t'E':[float (3*k*yy),-(a0+20+3*yy),2*zs+39],\r\n\t\t'r':[float (3*k*yy),-(a0+20+3*yy),3*zs+39],\r\n\t\t'R':[float (3*k*yy),-(a0+20+3*yy),3*zs+39],\r\n\t\t't':[float (3*k*yy),-(a0+20+3*yy),4*zs+39],\r\n\t\t'T':[float (3*k*yy),-(a0+20+3*yy),4*zs+39],\r\n\t\t'y':[float (3*k*yy),-(a0+20+3*yy),5*zs+39],\r\n\t\t'Y':[float (3*k*yy),-(a0+20+3*yy),5*zs+39],\r\n\t\t'u':[float (3*k*yy),-(a0+20+3*yy),6*zs+39],\r\n\t\t'U':[float (3*k*yy),-(a0+20+3*yy),6*zs+39],\r\n\t\t'i':[float (3*k*yy),-(a0+20+3*yy),7*zs+39],\r\n\t\t'I':[float (3*k*yy),-(a0+20+3*yy),7*zs+39],\r\n\t\t'o':[float (3*k*yy),-(a0+20+3*yy),8*zs+39],\r\n\t\t'O':[float (3*k*yy),-(a0+20+3*yy),8*zs+39],\r\n\t\t'p':[float (3*k*yy),-(a0+20+3*yy),9*zs+39],\r\n\t\t'P':[float (3*k*yy),-(a0+20+3*yy),9*zs+39],\r\n\t\t'[':[float (3*k*yy),-(a0+20+3*yy),10*zs+39],\r\n\t\t']':[float (3*k*yy),-(a0+20+3*yy),11*zs+39],\r\n\t\t'\\\\':[float (3*k*yy),-(a0+20+3*yy),12*zs+30+14],\r\n\t\t'`':[float (4*k*yy),-(a0+20+4*yy),float (zs/2)],\r\n\t\t'1':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+zs)],\r\n\t\t'2':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+2*zs)],\r\n\t\t'3':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+3*zs)],\r\n\t\t'4':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+4*zs)],\r\n\t\t'5':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+5*zs)],\r\n\t\t'6':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+6*zs)],\r\n\t\t'7':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+7*zs)],\r\n\t\t'8':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+8*zs)],\r\n\t\t'9':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+9*zs)],\r\n\t\t'0':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+10*zs)],\r\n\t\t'-':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+11*zs)],\r\n\t\t'=':[float (4*k*yy),-(a0+20+4*yy),float (zs/2+12*zs)],\r\n\t\t'BackSpace':[float (4*k*yy),-(a0+20+4*yy),13*zs+19]\r\n\t\t}\r\n\t\r\n# set parameters of robot (SI UNITS)\r\nL1,L2=0.115,0.064\r\nlen_link1=0.07\r\nlen_link2=0.04#distances of centers of mass from joint axes\r\nm_link1=0.005\r\nm_link2=0.003\r\nm_motor=0.06\r\nk=0.048\r\nR=3.6\r\nV=5\r\nr_pulley=0.0181102/2 #unit meters\r\nK_p,K_d=0.25,0.125\r\n\r\n# motor 1\r\nm1_in1_pin = 12\r\nm1_in2_pin = 16\r\nm1_en_pin = 18\r\nchan_list = [m1_en_pin, m1_in1_pin, m1_in2_pin]\r\nio.setup(chan_list, io.OUT)\r\np1 = io.PWM(m1_in1_pin, hz)\r\np2 = io.PWM(m1_in2_pin, hz)\r\n\r\n# motor 2\r\nm2_in1_pin = 22\r\nm2_in2_pin = 32\r\nm2_en_pin = 36\r\nchan_list = [m2_en_pin, m2_in1_pin, m2_in2_pin]\r\nio.setup(chan_list, io.OUT)\r\np3 = io.PWM(m2_in1_pin, hz)\r\np4 = io.PWM(m2_in2_pin, hz)\r\n\r\n# motor 3\r\nm3_in1_pin = 38\r\nm3_in2_pin = 40\r\nm3_en_pin = 37\r\nchan_list = [m3_en_pin, m3_in1_pin, m3_in2_pin]\r\nio.setup(chan_list, io.OUT)\r\np5 = io.PWM(m3_in1_pin, hz)\r\np6 = io.PWM(m3_in2_pin, hz)\r\n\r\n# sensor 1\r\nen1_pin = 35\r\nio.setup(en1_pin, io.IN, pull_up_down=io.PUD_UP)\r\n\r\n# sensor 2\r\nen2_pin = 33\r\nio.setup(en2_pin, io.IN, pull_up_down=io.PUD_UP)\r\n\r\n# encoder 1\r\nencoder1_sensors = [en1_pin, en2_pin]\r\nA1_old = 0\r\nencoder1_count = 0\r\nA1_t1 = time.time()\r\nvel1 = 0\r\nvel1_vec = []\r\n\r\n# sensor 3\r\nen3_pin = 31\r\nio.setup(en3_pin, io.IN, pull_up_down=io.PUD_UP)\r\n\r\n# sensor 4\r\nen4_pin = 29\r\nio.setup(en4_pin, io.IN, pull_up_down=io.PUD_UP)\r\n\r\n# encoder 2\r\nencoder2_sensors = [en3_pin, en4_pin]\r\nA2_old = 0\r\nencoder2_count = 0\r\nA2_t1 = time.time()\r\nvel2 = 0\r\nvel2_vec = []\r\n\r\n# sensor 5\r\nen5_pin = 15\r\nio.setup(en5_pin, io.IN, pull_up_down=io.PUD_UP)\r\n\r\n# sensor 6\r\nen6_pin = 13\r\nio.setup(en6_pin, io.IN, pull_up_down=io.PUD_UP)\r\n\r\n# encoder 3\r\nencoder3_sensors = [en5_pin, en6_pin]\r\nA3_old = 0\r\nencoder3_count = 0\r\nA3_t1 = time.time()\r\nvel3 = 0\r\nvel3_vec = []\r\n\r\ndef clockwise(duty, pwm1, pwm2, en_pin):\r\n\tio.output(en_pin, io.HIGH)\r\n\tpwm1.start(duty)\r\n\ttime.sleep(duty/100*dt)\r\n\tpwm2.start(100-duty)\r\n\t\r\ndef counter_clockwise(duty, pwm1, pwm2, en_pin):\r\n\tio.output(en_pin, io.HIGH)\r\n\tpwm2.start(duty)\r\n\ttime.sleep(duty/100*dt)\r\n\tpwm1.start(100-duty)\t\r\n\r\ndef countstorad(count):\r\n\t# returns the joints space angle in radians\r\n\trad = 2*math.pi*count/8/kr\r\n\treturn rad\t\r\n\r\ndef radtocount(rad):\r\n\tcount = rad*kr*8/(2*math.pi)\r\n\treturn count\r\n\t\r\ndef initializeEncoders():\r\n\tglobal encoder2_count, encoder3_count\r\n\tencoder2_count = 0\r\n\tencoder3_count = -math.pi/2\r\n\t\r\ndef resetEncoders():\r\n\tglobal encoder1_count, encoder2_count, encoder3_count\r\n\tencoder1_count = 0\r\n\tencoder2_count = 0\r\n\tencoder3_count = radtocount(-math.pi/2)\r\n\r\ndef encoder1Callback(channel):\r\n\t# this function is called when an encoder reading is detected\r\n\tglobal A1_old, encoder1_count, A1_t1, vel1, vel1_vec\r\n\tA1_t2 = time.time()\r\n\tif io.input(channel):\r\n\t\tA = 1\r\n\telse:\r\n\t\tA = 0\r\n\tif io.input(encoder1_sensors[1]):\r\n\t\tB = 1\r\n\telse:\r\n\t\tB = 0\r\n\tif A != A1_old:\r\n\t\tif A != B:\r\n\t\t\tencoder1_count += 1\r\n\t\t\tvel1_vec.insert(0,enc_res/(A1_t2 - A1_t1))\r\n\t\telse:\r\n\t\t\tencoder1_count -= 1\r\n\t\t\tvel1_vec.insert(0,-enc_res/(A1_t2 - A1_t1))\r\n\tif len(vel1_vec) > num_samples:\r\n\t\tvel1_vec.pop()\r\n\tvel1 = sum(vel1_vec)/len(vel1_vec)\r\n\tA1_old = A\r\n\tA1_t1 = A1_t2\r\nio.add_event_detect(en1_pin, io.BOTH, callback=encoder1Callback)\r\n\t\r\ndef encoder2Callback(channel):\r\n\t# this function is called when an encoder reading is detected\r\n\tglobal A2_old, encoder2_count, A2_t1, vel2, vel2_vec\r\n\tA2_t2 = time.time()\r\n\tif io.input(channel):\r\n\t\tA = 1\r\n\telse:\r\n\t\tA = 0\r\n\tif io.input(encoder2_sensors[1]):\r\n\t\tB = 1\r\n\telse:\r\n\t\tB = 0\r\n\tif A != A2_old:\r\n\t\tif A != B:\r\n\t\t\tencoder2_count -= 1\r\n\t\t\tvel2_vec.insert(0,-enc_res/(A2_t2 - A2_t1))\r\n\t\telse:\r\n\t\t\tencoder2_count += 1\r\n\t\t\tvel2_vec.insert(0,enc_res/(A2_t2 - A2_t1))\r\n\tif len(vel2_vec) > num_samples:\r\n\t\tvel2_vec.pop()\r\n\tvel2 = sum(vel2_vec)/len(vel2_vec)\r\n\tA2_old = A\r\n\tA2_t1 = A2_t2\r\nio.add_event_detect(en3_pin, io.BOTH, callback=encoder2Callback)\r\n\r\ndef encoder3Callback(channel):\r\n\t# this function is called when an encoder reading is detected\r\n\tglobal A3_old, encoder3_count, A3_t1, vel3, vel3_vec\r\n\tA3_t2 = time.time()\r\n\tif io.input(channel):\r\n\t\tA = 1\r\n\telse:\r\n\t\tA = 0\r\n\tif io.input(encoder3_sensors[1]):\r\n\t\tB = 1\r\n\telse:\r\n\t\tB = 0\r\n\tif A != A3_old:\r\n\t\tif A != B:\r\n\t\t\tencoder3_count -= 1\r\n\t\t\tvel3_vec.insert(0,-enc_res/(A3_t2 - A3_t1))\r\n\t\telse:\r\n\t\t\tencoder3_count += 1\r\n\t\t\tvel3_vec.insert(0,enc_res/(A3_t2 - A3_t1))\r\n\tif len(vel3_vec) > num_samples:\r\n\t\tvel3_vec.pop()\r\n\tvel3 = sum(vel3_vec)/len(vel3_vec)\r\n\tA3_old = A\r\n\tA3_t1 = A3_t2\r\nio.add_event_detect(en5_pin, io.BOTH, callback=encoder3Callback)\r\n\r\ndef invskinem(pose=[0,-.1,0]):\r\n \td1=pose[2]/(r_pulley)\r\n\tc3=float(((pose[0]-a1)**2+pose[1]**2-L1**2-L2**2)/(2*L1*L2))\r\n\ttry:\r\n\t\ts3=-math.sqrt(1-c3**2)\r\n\texcept:\r\n\t\tprint('Whoops!')\r\n\t\treturn\r\n\tth3=math.atan2(s3,c3)\r\n\tk=((pose[0]-a1)**2+L1**2+pose[1]**2-L2**2)/(2*L1)\r\n\tth2=math.atan2(math.sqrt((pose[0]-a1)**2+pose[1]**2-k**2),k)+math.atan2(pose[1],pose[0]-a1)\r\n\treturn [d1,th2,th3]\r\n\t \r\ndef keypose(read):\r\n\tif read in keydic:\r\n\t\toutput=keydic.get(read)\r\n\t\toutput = [float(output[0])/1000, float(output[1])/1000, float(output[2])/1000]\r\n\t\treturn output\r\n\telse:\r\n\t\tprint('Whoops! No keys found!')\r\n\treturn\r\n\t\r\ndef control1(pos_d):\r\n\ttry:\r\n\t\t# initialize the encoders\r\n\t\t##################################################\r\n\t\t#This is for motor1 control\r\n\t\t##################################################\r\n\t\ttolerance=0.005\r\n\t\tpos_error1=100\r\n\t\tf = open('data','a')\r\n\t\tf.write('New Data Theta 1 \\n')\r\n\t\tprint(\"Controlling motor 1\")\r\n\t\twhile abs(pos_error1) >=tolerance:\r\n\t\t\tpos_error1=pos_d[0]-countstorad(encoder1_count)\r\n\t\t\tduty_cycle_1=100\r\n\t\t\tif pos_error1>0:\r\n\t\t\t\tclockwise(duty_cycle_1, p1, p2, m1_en_pin)\r\n\t\t\telif pos_error1<0:\r\n\t\t\t\tclockwise(100-duty_cycle_1,p1,p2,m1_en_pin)\r\n\t\t\trow = str(countstorad(encoder1_count))+'\\t'+str(vel1)+'\\n'\r\n\t\t\tf.write(row)\r\n\t\tp1.stop()\r\n\t\tp2.stop()\r\n\t\ttime.sleep(2)\r\n\t\t##################################################\r\n\t\t#This is for motor2 and motor3 control\r\n\t\t##################################################\r\n\t\tprint(\"Controlling Motors 2 and 3\")\r\n\t\tf.write('New Data Theta 2, Theta 3 \\n')\r\n\t\tposition_error=[100,100]\r\n\t\twhile max(abs(position_error[0]),abs(position_error[1])) > tolerance:\r\n\t\t\t# get current position\r\n\t\t\tpos_current=[countstorad(encoder2_count),countstorad(encoder3_count)]\r\n\t\t\tangular_velocity=[vel2,vel3]\r\n\t\t\trow = str(countstorad(encoder2_count))+'\\t'+str(vel2)+'\\t'+str(countstorad(encoder3_count))+'\\t'+str(vel3)+'\\n'\r\n\t\t\tf.write(row)\r\n\t\t\t# estimate g(q)\r\n\t\t\tg_q=[(m_link1*len_link1+m_motor*L1+m_link2*L1)*math.cos(pos_current[0])+\\\r\n\t\t\tm_link2*len_link2*math.cos(pos_current[0]+pos_current[1]),\\\r\n\t\t\tm_link2*len_link2*math.cos(pos_current[0]+pos_current[1])]\r\n\t\t\t# calculate position error\r\n\t\t\tposition_error=[pos_d[1]-pos_current[0],pos_d[2]-pos_current[1]]\r\n\t\t\t# u = PD control with gravity compensation\r\n\t\t\tu=[g_q[0]+K_p*position_error[0]-K_d*angular_velocity[0],\\\r\n\t\t\tg_q[1]+K_p*position_error[1]-K_d*angular_velocity[1]]\r\n\t\t\tfor i in range(2):\r\n\t\t\t\tif u[i]>=0.08:\r\n\t\t\t\t\tu[i]=0.08\r\n\t\t\t\telif u[i]<=-0.08:\r\n\t\t\t\t\tu[i]=-0.08\r\n\t\t\t\r\n\t\t\t# duty = function(u)\r\n\t\t\tV_d=[R*u[0]/k+k*angular_velocity[0],R*u[1]/k+k*angular_velocity[1]]\r\n\t\t\tduty=[V_d[0]/V*100,V_d[1]/V*100]\r\n\t\t\t# move the motors according to duty\r\n\t\t\t#motor1 duty cycle ##############################\r\n\t\t\tif duty[0]>0:\r\n\t\t\t\tif duty[0]>=100:\r\n\t\t\t\t\tduty[0]=100\r\n\t\t\t\telif duty[0]<=70:\r\n\t\t\t\t\tduty[0]=50\r\n\t\t\t\tclockwise(duty[0], p3, p4, m2_en_pin)\r\n\t\t\telse:\r\n\t\t\t\tif duty[0]<=-100:\r\n\t\t\t\t\tduty[0]=0\r\n\t\t\t\telif duty[0] > -100 and duty[0] <= -70:\r\n\t\t\t\t\tduty[0]=100+duty[0]\r\n\t\t\t\telif duty[0]>-70:\r\n\t\t\t\t\tduty[0]=50\r\n\t\t\t\tclockwise(duty[0],p3,p4,m2_en_pin)\r\n\t\t\t###################################################\r\n\t\t\t#motor2 duty cycle ################################\r\n\t\t\tif duty[1]>0:\r\n\t\t\t\tif duty[1]>=100:\r\n\t\t\t\t\tduty[1]=100\r\n\t\t\t\telif duty[1]<=70:\r\n\t\t\t\t\tduty[1]=50\r\n\t\t\t\tclockwise(duty[1], p5, p6, m3_en_pin)\r\n\t\t\telse:\r\n\t\t\t\tif duty[1]<=-100:\r\n\t\t\t\t\tduty[1]=0\r\n\t\t\t\telif duty[1] > -100 and duty[1] <= -70:\r\n\t\t\t\t\tduty[1]=100+duty[1]\r\n\t\t\t\telif duty[1]>-70:\r\n\t\t\t\t\tduty[1]=50\r\n\t\t\t\tclockwise(duty[1],p5,p6,m3_en_pin)\r\n\t\t\t####################################################\r\n\r\ndef control2(pos_d):\r\n try:\r\n\t\t# initialize the encoders\r\n\t\ttolerance=0.005\r\n\t\tpos_error1=100\r\n\t\t##################################################\r\n\t\t#This is for motor2 and motor3 control\r\n\t\t##################################################\r\n\t\tprint(\"Controlling Motors 2 and 3\")\r\n\t\tposition_error=[100,100]\r\n\t\tf.open('data','a')\r\n\t\tf.write('New Data Theta 2 Theta 3 \\n')\r\n\t\twhile max(abs(position_error[0]),abs(position_error[1])) > tolerance:\r\n\t\t\t# get current position\r\n\t\t\tpos_current=[countstorad(encoder2_count),countstorad(encoder3_count)]\r\n\t\t\tangular_velocity=[vel2,vel3]\r\n\t\t\trow = str(countstorad(encoder2_count))+'\\t'+str(vel2)+'\\t'+str(countstorad(encoder3_count))+'\\t'+str(vel3)+'\\n'\r\n\t\t\tf.write(row)\r\n\t\t\t# estimate g(q)\r\n\t\t\tg_q=[(m_link1*len_link1+m_motor*L1+m_link2*L1)*math.cos(pos_current[0])+\\\r\n\t\t\tm_link2*len_link2*math.cos(pos_current[0]+pos_current[1]),\\\r\n\t\t\tm_link2*len_link2*math.cos(pos_current[0]+pos_current[1])]\r\n\t\t\t# calculate position error\r\n\t\t\tposition_error=[pos_d[1]-pos_current[0],pos_d[2]-pos_current[1]]\r\n\t\t\t# u = PD control with gravity compensation\r\n\t\t\tu=[g_q[0]+K_p*position_error[0]-K_d*angular_velocity[0],\\\r\n\t\t\tg_q[1]+K_p*position_error[1]-K_d*angular_velocity[1]]\r\n\t\t\tfor i in range(2):\r\n\t\t\t\tif u[i]>=0.08:\r\n\t\t\t\t\tu[i]=0.08\r\n\t\t\t\telif u[i]<=-0.08:\r\n\t\t\t\t\tu[i]=-0.08\r\n\t\t\t\r\n\t\t\t# duty = function(u)\r\n\t\t\tV_d=[R*u[0]/k+k*angular_velocity[0],R*u[1]/k+k*angular_velocity[1]]\r\n\t\t\tduty=[V_d[0]/V*100,V_d[1]/V*100]\r\n\t\t\t# move the motors according to duty\r\n\t\t\t#motor1 duty cycle ##############################\r\n\t\t\tif duty[0]>0:\r\n\t\t\t\tif duty[0]>=100:\r\n\t\t\t\t\tduty[0]=100\r\n\t\t\t\telif duty[0]<=70:\r\n\t\t\t\t\tduty[0]=50\r\n\t\t\t\tclockwise(duty[0], p3, p4, m2_en_pin)\r\n\t\t\telse:\r\n\t\t\t\tif duty[0]<=-100:\r\n\t\t\t\t\tduty[0]=0\r\n\t\t\t\telif duty[0] > -100 and duty[0] <= -70:\r\n\t\t\t\t\tduty[0]=100+duty[0]\r\n\t\t\t\telif duty[0]>-70:\r\n\t\t\t\t\tduty[0]=50\r\n\t\t\t\tclockwise(duty[0],p3,p4,m2_en_pin)\r\n\t\t\t###################################################\r\n\t\t\t#motor2 duty cycle ################################\r\n\t\t\tif duty[1]>0:\r\n\t\t\t\tif duty[1]>=100:\r\n\t\t\t\t\tduty[1]=100\r\n\t\t\t\telif duty[1]<=70:\r\n\t\t\t\t\tduty[1]=50\r\n\t\t\t\tclockwise(duty[1], p5, p6, m3_en_pin)\r\n\t\t\telse:\r\n\t\t\t\tif duty[1]<=-100:\r\n\t\t\t\t\tduty[1]=0\r\n\t\t\t\telif duty[1] > -100 and duty[1] <= -70:\r\n\t\t\t\t\tduty[1]=100+duty[1]\r\n\t\t\t\telif duty[1]>-70:\r\n\t\t\t\t\tduty[1]=50\r\n\t\t\t\tclockwise(duty[1],p5,p6,m3_en_pin)\r\n\t\t\t####################################################\r\n\t\t##################################################\r\n\t\t#This is for motor1 control\r\n\t\t##################################################\r\n\t\tprint(\"Controlling motor 1\")\r\n\t\twhile abs(pos_error1) >=tolerance:\r\n\t\t\trow = str(countstorad(encoder1_count))+'\\t'str(vel1)\r\n\t\t\tf.write(row)\r\n\t\t\tpos_error1=pos_d[0]-countstorad(encoder1_count)\r\n\t\t\tduty_cycle_1=100\r\n\t\t\tif pos_error1>0:\r\n\t\t\t\tclockwise(duty_cycle_1, p1, p2, m1_en_pin)\r\n\t\t\telif pos_error1<0:\r\n\t\t\t\tclockwise(100-duty_cycle_1,p1,p2,m1_en_pin)\r\n\t\tp1.stop()\r\n\t\tp2.stop()\r\n\t\tp3.stop()\r\n\t\tp4.stop()\r\n\t\tp5.stop()\r\n\t\tp6.stop()\r\n\texcept KeyboardInterrupt:\r\n\t\tp1.stop()\r\n\t\tp2.stop()\r\n\t\tp3.stop()\r\n\t\tp4.stop()\r\n\t\tp5.stop()\r\n\t\tp6.stop()\r\n\t\tio.cleanup()\r\n\t\t\r\ndef correctEncoders(desired):\r\n\tglobal encoder1_count, encoder2_count, encoder3_count\r\n\tpos_correct = [radtocount(desired[0]), radtocount(desired[1]), radtocount(desired[2])]\r\n\tencoder1_count = pos_correct[0]\r\n\tencoder2_count = pos_correct[1]\r\n\tencoder3_count = pos_correct[2]\r\n\r\ndef taskcontrol(command_list):\r\n initializeEncoders()\r\n\tn = len(command_list)\r\n\tfor i in xrange(0,n):\r\n\t# for each key in string_desired\r\n\t\t# get position\r\n\t\tcurrent_pos = [countstorad(encoder1_count), countstorad(encoder2_count), countstorad(encoder3_count)]\r\n\t\t# get nearest home position (theta 1 is arbitrary)\r\n\t\tnearest_home = [current_pos[0], 0, -math.pi/2]\r\n\t\tprint(current_pos)\r\n\t\tif (current_pos[1] != nearest_home[1]) or (current_pos[2] != nearest_home[2]):\r\n\t\t\tprint(\"Going to nearest home\")\r\n\t\t\tcontrol2(nearest_home)\r\n\t\t\tcorrectEncoders(nearest_home)\r\n\t\t\tprint(\"I'm Home!\")\r\n\t\t\ttime.sleep(1)\r\n\t\tcart_pos_d = keypose(command_list[i])\r\n\t\t# getpose(key_desired)\r\n\t\tjoint_pos_d = invskinem(cart_pos_d)\r\n\t\t# inverse kinematics to find joint space position\r\n\t\tprint(\"Move to \"+command_list[i])\r\n\t\tcontrol1(joint_pos_d)\r\n\t\tcorrectEncoders(joint_pos_d)\r\n\t\tprint(\"Motion Complete!\")\r\n\t\ttime.sleep(1)\r\n\t\t# control(pose_desired)\r\n\t# end for loop\r\n\tabs_home = [0, 0, -math.pi/2]\r\n\t# return to global home position\r\n\tprint(\"Going to Absolute Home Position\")\r\n\tcontrol2(abs_home)\r\n\tcorrectEncoders(abs_home)\r\n\t\r\n# This will run when executing the python file, causing the robot to type 'hello'\r\ncommand_list = ['h','e','l','l','o']\r\ntaskcontrol(command_list)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from os import walk
from ccal import VERSION
from setuptools import setup
package_data = []
for directory_path, directory_names, file_names in walk("data"):
for file_name in file_names:
package_data.append("{}/{}".format(directory_path, file_name))
setup(
name="ccal",
version=VERSION,
description="Computational Cancer Analysis Library",
url="https://github.com/KwatME/ccal",
author="Kwat Medetgul-Ernar (Huwate Yeerna)",
author_email="kwatme8@gmail.com",
license="LICENSE",
classifiers=(
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Bio-Informatics",
),
python_requires=">=3.6",
install_requires=(),
include_package_data=True,
package_data={"ccal": package_data},
)
|
normal
|
{
"blob_id": "11d0e84767f7e9e4687962a3a5c58dc882cc4dd2",
"index": 1934,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor directory_path, directory_names, file_names in walk('data'):\n for file_name in file_names:\n package_data.append('{}/{}'.format(directory_path, file_name))\nsetup(name='ccal', version=VERSION, description=\n 'Computational Cancer Analysis Library', url=\n 'https://github.com/KwatME/ccal', author=\n 'Kwat Medetgul-Ernar (Huwate Yeerna)', author_email='kwatme8@gmail.com',\n license='LICENSE', classifiers=('Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7', 'Natural Language :: English',\n 'Topic :: Scientific/Engineering :: Bio-Informatics'), python_requires=\n '>=3.6', install_requires=(), include_package_data=True, package_data={\n 'ccal': package_data})\n",
"step-3": "<mask token>\npackage_data = []\nfor directory_path, directory_names, file_names in walk('data'):\n for file_name in file_names:\n package_data.append('{}/{}'.format(directory_path, file_name))\nsetup(name='ccal', version=VERSION, description=\n 'Computational Cancer Analysis Library', url=\n 'https://github.com/KwatME/ccal', author=\n 'Kwat Medetgul-Ernar (Huwate Yeerna)', author_email='kwatme8@gmail.com',\n license='LICENSE', classifiers=('Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7', 'Natural Language :: English',\n 'Topic :: Scientific/Engineering :: Bio-Informatics'), python_requires=\n '>=3.6', install_requires=(), include_package_data=True, package_data={\n 'ccal': package_data})\n",
"step-4": "from os import walk\nfrom ccal import VERSION\nfrom setuptools import setup\npackage_data = []\nfor directory_path, directory_names, file_names in walk('data'):\n for file_name in file_names:\n package_data.append('{}/{}'.format(directory_path, file_name))\nsetup(name='ccal', version=VERSION, description=\n 'Computational Cancer Analysis Library', url=\n 'https://github.com/KwatME/ccal', author=\n 'Kwat Medetgul-Ernar (Huwate Yeerna)', author_email='kwatme8@gmail.com',\n license='LICENSE', classifiers=('Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7', 'Natural Language :: English',\n 'Topic :: Scientific/Engineering :: Bio-Informatics'), python_requires=\n '>=3.6', install_requires=(), include_package_data=True, package_data={\n 'ccal': package_data})\n",
"step-5": "from os import walk\n\nfrom ccal import VERSION\nfrom setuptools import setup\n\npackage_data = []\n\nfor directory_path, directory_names, file_names in walk(\"data\"):\n\n for file_name in file_names:\n\n package_data.append(\"{}/{}\".format(directory_path, file_name))\n\nsetup(\n name=\"ccal\",\n version=VERSION,\n description=\"Computational Cancer Analysis Library\",\n url=\"https://github.com/KwatME/ccal\",\n author=\"Kwat Medetgul-Ernar (Huwate Yeerna)\",\n author_email=\"kwatme8@gmail.com\",\n license=\"LICENSE\",\n classifiers=(\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Natural Language :: English\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n ),\n python_requires=\">=3.6\",\n install_requires=(),\n include_package_data=True,\n package_data={\"ccal\": package_data},\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Admin module for Django."""
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django_q.conf import Conf, croniter
from django_q.models import Failure, OrmQ, Schedule, Success
from django_q.tasks import async_task
class TaskAdmin(admin.ModelAdmin):
"""model admin for success tasks."""
list_display = ("name", "func", "started", "stopped", "time_taken", "group")
def has_add_permission(self, request):
"""Don't allow adds."""
return False
def get_queryset(self, request):
"""Only show successes."""
qs = super(TaskAdmin, self).get_queryset(request)
return qs.filter(success=True)
search_fields = ("name", "func", "group")
readonly_fields = []
list_filter = ("group",)
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields]
def retry_failed(FailAdmin, request, queryset):
"""Submit selected tasks back to the queue."""
for task in queryset:
async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})
task.delete()
retry_failed.short_description = _("Resubmit selected tasks to queue")
class FailAdmin(admin.ModelAdmin):
"""model admin for failed tasks."""
list_display = ("name", "func", "started", "stopped", "short_result")
def has_add_permission(self, request):
"""Don't allow adds."""
return False
actions = [retry_failed]
search_fields = ("name", "func")
list_filter = ("group",)
readonly_fields = []
def get_readonly_fields(self, request, obj=None):
"""Set all fields readonly."""
return list(self.readonly_fields) + [field.name for field in obj._meta.fields]
class ScheduleAdmin(admin.ModelAdmin):
"""model admin for schedules"""
list_display = (
"id",
"name",
"func",
"schedule_type",
"repeats",
"cluster",
"next_run",
"last_run",
"success",
)
# optional cron strings
if not croniter:
readonly_fields = ("cron",)
list_filter = ("next_run", "schedule_type", "cluster")
search_fields = ("func",)
list_display_links = ("id", "name")
class QueueAdmin(admin.ModelAdmin):
"""queue admin for ORM broker"""
list_display = ("id", "key", "task_id", "name", "func", "lock")
def save_model(self, request, obj, form, change):
obj.save(using=Conf.ORM)
def delete_model(self, request, obj):
obj.delete(using=Conf.ORM)
def get_queryset(self, request):
return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)
def has_add_permission(self, request):
"""Don't allow adds."""
return False
list_filter = ("key",)
admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(Success, TaskAdmin)
admin.site.register(Failure, FailAdmin)
if Conf.ORM or Conf.TESTING:
admin.site.register(OrmQ, QueueAdmin)
|
normal
|
{
"blob_id": "5aebebb7f22e094a1a897b3266ff07d59400b76c",
"index": 2209,
"step-1": "<mask token>\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n search_fields = 'name', 'func', 'group'\n readonly_fields = []\n list_filter = 'group',\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\n<mask token>\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'short_result'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n actions = [retry_failed]\n search_fields = 'name', 'func'\n list_filter = 'group',\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n search_fields = 'name', 'func', 'group'\n readonly_fields = []\n list_filter = 'group',\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\ndef retry_failed(FailAdmin, request, queryset):\n \"\"\"Submit selected tasks back to the queue.\"\"\"\n for task in queryset:\n async_task(task.func, *(task.args or ()), hook=task.hook, **task.\n kwargs or {})\n task.delete()\n\n\n<mask token>\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'short_result'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n actions = [retry_failed]\n search_fields = 'name', 'func'\n list_filter = 'group',\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'time_taken', 'group'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n search_fields = 'name', 'func', 'group'\n readonly_fields = []\n list_filter = 'group',\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\ndef retry_failed(FailAdmin, request, queryset):\n \"\"\"Submit selected tasks back to the queue.\"\"\"\n for task in queryset:\n async_task(task.func, *(task.args or ()), hook=task.hook, **task.\n kwargs or {})\n task.delete()\n\n\n<mask token>\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n list_display = 'name', 'func', 'started', 'stopped', 'short_result'\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n actions = [retry_failed]\n search_fields = 'name', 'func'\n list_filter = 'group',\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj.\n _meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n list_display = ('id', 'name', 'func', 'schedule_type', 'repeats',\n 'cluster', 'next_run', 'last_run', 'success')\n if not croniter:\n readonly_fields = 'cron',\n list_filter = 'next_run', 'schedule_type', 'cluster'\n search_fields = 'func',\n list_display_links = 'id', 'name'\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n list_display = 'id', 'key', 'task_id', 'name', 'func', 'lock'\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n list_filter = 'key',\n\n\nadmin.site.register(Schedule, ScheduleAdmin)\nadmin.site.register(Success, TaskAdmin)\nadmin.site.register(Failure, FailAdmin)\nif Conf.ORM or Conf.TESTING:\n admin.site.register(OrmQ, QueueAdmin)\n",
"step-5": "\"\"\"Admin module for Django.\"\"\"\nfrom django.contrib import admin\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django_q.conf import Conf, croniter\nfrom django_q.models import Failure, OrmQ, Schedule, Success\nfrom django_q.tasks import async_task\n\n\nclass TaskAdmin(admin.ModelAdmin):\n \"\"\"model admin for success tasks.\"\"\"\n\n list_display = (\"name\", \"func\", \"started\", \"stopped\", \"time_taken\", \"group\")\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n def get_queryset(self, request):\n \"\"\"Only show successes.\"\"\"\n qs = super(TaskAdmin, self).get_queryset(request)\n return qs.filter(success=True)\n\n search_fields = (\"name\", \"func\", \"group\")\n readonly_fields = []\n list_filter = (\"group\",)\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]\n\n\ndef retry_failed(FailAdmin, request, queryset):\n \"\"\"Submit selected tasks back to the queue.\"\"\"\n for task in queryset:\n async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {})\n task.delete()\n\n\nretry_failed.short_description = _(\"Resubmit selected tasks to queue\")\n\n\nclass FailAdmin(admin.ModelAdmin):\n \"\"\"model admin for failed tasks.\"\"\"\n\n list_display = (\"name\", \"func\", \"started\", \"stopped\", \"short_result\")\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n actions = [retry_failed]\n search_fields = (\"name\", \"func\")\n list_filter = (\"group\",)\n readonly_fields = []\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Set all fields readonly.\"\"\"\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]\n\n\nclass ScheduleAdmin(admin.ModelAdmin):\n \"\"\"model admin for schedules\"\"\"\n\n list_display = (\n \"id\",\n \"name\",\n \"func\",\n \"schedule_type\",\n \"repeats\",\n \"cluster\",\n \"next_run\",\n \"last_run\",\n \"success\",\n )\n\n # optional cron strings\n if not croniter:\n readonly_fields = (\"cron\",)\n\n list_filter = (\"next_run\", \"schedule_type\", \"cluster\")\n search_fields = (\"func\",)\n list_display_links = (\"id\", \"name\")\n\n\nclass QueueAdmin(admin.ModelAdmin):\n \"\"\"queue admin for ORM broker\"\"\"\n\n list_display = (\"id\", \"key\", \"task_id\", \"name\", \"func\", \"lock\")\n\n def save_model(self, request, obj, form, change):\n obj.save(using=Conf.ORM)\n\n def delete_model(self, request, obj):\n obj.delete(using=Conf.ORM)\n\n def get_queryset(self, request):\n return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM)\n\n def has_add_permission(self, request):\n \"\"\"Don't allow adds.\"\"\"\n return False\n\n list_filter = (\"key\",)\n\n\nadmin.site.register(Schedule, ScheduleAdmin)\nadmin.site.register(Success, TaskAdmin)\nadmin.site.register(Failure, FailAdmin)\n\nif Conf.ORM or Conf.TESTING:\n admin.site.register(OrmQ, QueueAdmin)\n",
"step-ids": [
10,
21,
22,
23,
26
]
}
|
[
10,
21,
22,
23,
26
] |
import itertools
def zbits(n,k):
zeros = "0" * k
ones = "1" * (n-k)
binary = ones+zeros
string = {''.join(i) for i in itertools.permutations(binary, n)}
return(string)
assert zbits(4, 3) == {'0100', '0001', '0010', '1000'}
assert zbits(4, 1) == {'0111', '1011', '1101', '1110'}
assert zbits(5, 4) == {'00001', '00100', '01000', '10000', '00010'}
|
normal
|
{
"blob_id": "a8d13c3fbf6051eba392bcdd6dcb3e946696585f",
"index": 9065,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef zbits(n, k):\n zeros = '0' * k\n ones = '1' * (n - k)\n binary = ones + zeros\n string = {''.join(i) for i in itertools.permutations(binary, n)}\n return string\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef zbits(n, k):\n zeros = '0' * k\n ones = '1' * (n - k)\n binary = ones + zeros\n string = {''.join(i) for i in itertools.permutations(binary, n)}\n return string\n\n\nassert zbits(4, 3) == {'0100', '0001', '0010', '1000'}\nassert zbits(4, 1) == {'0111', '1011', '1101', '1110'}\nassert zbits(5, 4) == {'00001', '00100', '01000', '10000', '00010'}\n",
"step-4": "import itertools\n\n\ndef zbits(n, k):\n zeros = '0' * k\n ones = '1' * (n - k)\n binary = ones + zeros\n string = {''.join(i) for i in itertools.permutations(binary, n)}\n return string\n\n\nassert zbits(4, 3) == {'0100', '0001', '0010', '1000'}\nassert zbits(4, 1) == {'0111', '1011', '1101', '1110'}\nassert zbits(5, 4) == {'00001', '00100', '01000', '10000', '00010'}\n",
"step-5": "import itertools \n\ndef zbits(n,k):\n zeros = \"0\" * k\n ones = \"1\" * (n-k)\n binary = ones+zeros\n string = {''.join(i) for i in itertools.permutations(binary, n)}\n return(string)\n\n\nassert zbits(4, 3) == {'0100', '0001', '0010', '1000'}\nassert zbits(4, 1) == {'0111', '1011', '1101', '1110'}\nassert zbits(5, 4) == {'00001', '00100', '01000', '10000', '00010'}",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for b in range(1, w + 1):
print('*', end='')
print('')
for i in range(1, h - 1):
print('*', end='')
for j in range(1, w - 1):
print(' ', end='')
print('*', end='')
print('')
for b in range(1, w + 1):
print('*', end='')
print('')
<|reserved_special_token_1|>
w = int(input('Width ?'))
h = int(input('Height ?'))
for b in range(1, w + 1):
print('*', end='')
print('')
for i in range(1, h - 1):
print('*', end='')
for j in range(1, w - 1):
print(' ', end='')
print('*', end='')
print('')
for b in range(1, w + 1):
print('*', end='')
print('')
<|reserved_special_token_1|>
w = int(input("Width ?"))
h= int(input("Height ?"))
for b in range(1,w+1):
print ("*", end='')
print("")
for i in range(1,h-1):
print ("*", end='')
for j in range(1,w-1):
print (" ", end='')
print ("*", end='')
print("")
for b in range(1,w+1):
print ("*", end='')
print("")
|
flexible
|
{
"blob_id": "32b961f3971819fdbbe1a30fd7cf1883353c1854",
"index": 2294,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\nfor i in range(1, h - 1):\n print('*', end='')\n for j in range(1, w - 1):\n print(' ', end='')\n print('*', end='')\n print('')\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\n",
"step-3": "w = int(input('Width ?'))\nh = int(input('Height ?'))\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\nfor i in range(1, h - 1):\n print('*', end='')\n for j in range(1, w - 1):\n print(' ', end='')\n print('*', end='')\n print('')\nfor b in range(1, w + 1):\n print('*', end='')\nprint('')\n",
"step-4": "w = int(input(\"Width ?\"))\nh= int(input(\"Height ?\"))\n\n\nfor b in range(1,w+1):\n\tprint (\"*\", end='')\nprint(\"\")\n\n\nfor i in range(1,h-1):\n\tprint (\"*\", end='')\n\tfor j in range(1,w-1):\n\t\tprint (\" \", end='')\n\tprint (\"*\", end='')\n\tprint(\"\")\n\nfor b in range(1,w+1):\n\tprint (\"*\", end='')\nprint(\"\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(0, len(a)):
if a[i] < 5:
print(str(a[i]) + ' ')
i += 1
else:
i += 1
<|reserved_special_token_1|>
a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]
for i in range(0, len(a)):
if a[i] < 5:
print(str(a[i]) + ' ')
i += 1
else:
i += 1
<|reserved_special_token_1|>
a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]
for i in range(0, len(a)):
if a[i] < 5:
print(str(a[i]) + " ")
i += 1
else:
i += 1
|
flexible
|
{
"blob_id": "24635989ccdb0f35f1e618dd8dc07f2cf84faddb",
"index": 6621,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, len(a)):\n if a[i] < 5:\n print(str(a[i]) + ' ')\n i += 1\n else:\n i += 1\n",
"step-3": "a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]\nfor i in range(0, len(a)):\n if a[i] < 5:\n print(str(a[i]) + ' ')\n i += 1\n else:\n i += 1\n",
"step-4": "a = [1, 1, 2, 3, 4, 4, 5, 7, 12, 30, 49]\n\nfor i in range(0, len(a)):\n if a[i] < 5:\n print(str(a[i]) + \" \")\n i += 1\n else:\n i += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TCP:
<|reserved_special_token_0|>
def connect(self, host, port):
server_address = host, port
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
data = json.dumps(dict({'gain': value, 'convergence': convergence})
).encode()
print('Sending value {} as data {}'.format(value, data))
self.sock.sendall(data)
<|reserved_special_token_0|>
def receive(self):
data = self.sock.recv(1024)
print('Received: {}'.format(data))
value = json.loads(data)
return value
def close(self):
print('Closing socket')
self.sock.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TCP:
<|reserved_special_token_0|>
def connect(self, host, port):
server_address = host, port
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
data = json.dumps(dict({'gain': value, 'convergence': convergence})
).encode()
print('Sending value {} as data {}'.format(value, data))
self.sock.sendall(data)
def send2(self, radius, gain, convergence=False):
"""Send two values (distortion gain, and radius) to the server"""
data = json.dumps(dict({'gain': gain, 'radius': radius,
'convergence': convergence})).encode()
print('Sending value ({}, {}) as data {}'.format(radius, gain, data))
self.sock.sendall(data)
def receive(self):
data = self.sock.recv(1024)
print('Received: {}'.format(data))
value = json.loads(data)
return value
def close(self):
print('Closing socket')
self.sock.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TCP:
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
server_address = host, port
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
data = json.dumps(dict({'gain': value, 'convergence': convergence})
).encode()
print('Sending value {} as data {}'.format(value, data))
self.sock.sendall(data)
def send2(self, radius, gain, convergence=False):
"""Send two values (distortion gain, and radius) to the server"""
data = json.dumps(dict({'gain': gain, 'radius': radius,
'convergence': convergence})).encode()
print('Sending value ({}, {}) as data {}'.format(radius, gain, data))
self.sock.sendall(data)
def receive(self):
data = self.sock.recv(1024)
print('Received: {}'.format(data))
value = json.loads(data)
return value
def close(self):
print('Closing socket')
self.sock.close()
<|reserved_special_token_1|>
import socket
import json
import numpy as np
<|reserved_special_token_0|>
class TCP:
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
server_address = host, port
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
data = json.dumps(dict({'gain': value, 'convergence': convergence})
).encode()
print('Sending value {} as data {}'.format(value, data))
self.sock.sendall(data)
def send2(self, radius, gain, convergence=False):
"""Send two values (distortion gain, and radius) to the server"""
data = json.dumps(dict({'gain': gain, 'radius': radius,
'convergence': convergence})).encode()
print('Sending value ({}, {}) as data {}'.format(radius, gain, data))
self.sock.sendall(data)
def receive(self):
data = self.sock.recv(1024)
print('Received: {}'.format(data))
value = json.loads(data)
return value
def close(self):
print('Closing socket')
self.sock.close()
<|reserved_special_token_1|>
import socket
import json
import numpy as np
"""TCP client used to communicate with the Unity Application"""
class TCP:
def __init__(self, sock = None):
# Create a TCP socket
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
def connect(self, host, port):
server_address = (host, port)
print('connecting to {} port {}'.format(*server_address))
self.sock.connect(server_address)
def send(self, value, convergence=False):
"""Send one value (distortion gain) to the server"""
# dump to json format
data = json.dumps(dict({"gain" : value, "convergence" : convergence})).encode()
print("Sending value {} as data {}".format(value, data))
self.sock.sendall(data)
def send2(self, radius, gain, convergence=False):
"""Send two values (distortion gain, and radius) to the server"""
# dump to json format
data = json.dumps(dict({"gain" : gain, "radius": radius, "convergence" : convergence})).encode()
print("Sending value ({}, {}) as data {}".format(radius, gain, data))
self.sock.sendall(data)
def receive(self):
# Convert bytes to float
data = self.sock.recv(1024)
print("Received: {}".format(data))
value = json.loads(data)
return value
def close(self):
print("Closing socket")
self.sock.close()
|
flexible
|
{
"blob_id": "cc66dcd34115e72479953ca24f4b2eaeb52cf313",
"index": 7747,
"step-1": "<mask token>\n\n\nclass TCP:\n <mask token>\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n <mask token>\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-2": "<mask token>\n\n\nclass TCP:\n <mask token>\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n data = json.dumps(dict({'gain': gain, 'radius': radius,\n 'convergence': convergence})).encode()\n print('Sending value ({}, {}) as data {}'.format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-3": "<mask token>\n\n\nclass TCP:\n\n def __init__(self, sock=None):\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n data = json.dumps(dict({'gain': gain, 'radius': radius,\n 'convergence': convergence})).encode()\n print('Sending value ({}, {}) as data {}'.format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-4": "import socket\nimport json\nimport numpy as np\n<mask token>\n\n\nclass TCP:\n\n def __init__(self, sock=None):\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n server_address = host, port\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n data = json.dumps(dict({'gain': value, 'convergence': convergence})\n ).encode()\n print('Sending value {} as data {}'.format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n data = json.dumps(dict({'gain': gain, 'radius': radius,\n 'convergence': convergence})).encode()\n print('Sending value ({}, {}) as data {}'.format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n data = self.sock.recv(1024)\n print('Received: {}'.format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print('Closing socket')\n self.sock.close()\n",
"step-5": "import socket\nimport json\nimport numpy as np\n\n\"\"\"TCP client used to communicate with the Unity Application\"\"\"\n\nclass TCP:\n def __init__(self, sock = None):\n # Create a TCP socket\n if sock is None:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n self.sock = sock\n\n def connect(self, host, port):\n server_address = (host, port)\n print('connecting to {} port {}'.format(*server_address))\n self.sock.connect(server_address)\n\n def send(self, value, convergence=False):\n \"\"\"Send one value (distortion gain) to the server\"\"\"\n # dump to json format\n data = json.dumps(dict({\"gain\" : value, \"convergence\" : convergence})).encode()\n print(\"Sending value {} as data {}\".format(value, data))\n self.sock.sendall(data)\n\n def send2(self, radius, gain, convergence=False):\n \"\"\"Send two values (distortion gain, and radius) to the server\"\"\"\n # dump to json format\n data = json.dumps(dict({\"gain\" : gain, \"radius\": radius, \"convergence\" : convergence})).encode()\n print(\"Sending value ({}, {}) as data {}\".format(radius, gain, data))\n self.sock.sendall(data)\n\n def receive(self):\n # Convert bytes to float\n data = self.sock.recv(1024)\n print(\"Received: {}\".format(data))\n value = json.loads(data)\n return value\n\n def close(self):\n print(\"Closing socket\")\n self.sock.close()",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def checkio(data):
return True or False
<|reserved_special_token_1|>
'''
Given an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- "{}" "()" "[]". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.
Input: An expression with different of types brackets.
Output: A boolean. Correct an expression or not.
Example:
?
1
2
3
4
5
checkio("((5+3)*2+1)") == True
checkio("{[(3+1)+2]+}") == True
checkio("(3+{1-1)}") == False
checkio("[1+1]+(2*2)-{3/3}") == True
checkio("(({[(((1)-2)+3)-3]/3}-3)") == False
'''
def checkio(data):
#replace this for solution
return True or False
|
flexible
|
{
"blob_id": "f69b4d022ebed5a0b660f55704bbe762d5d765d5",
"index": 1332,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef checkio(data):\n return True or False\n",
"step-3": "'''\nGiven an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- \"{}\" \"()\" \"[]\". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.\nInput: An expression with different of types brackets.\nOutput: A boolean. Correct an expression or not.\nExample:\n?\n1\n2\n3\n4\n5\ncheckio(\"((5+3)*2+1)\") == True\ncheckio(\"{[(3+1)+2]+}\") == True\ncheckio(\"(3+{1-1)}\") == False\ncheckio(\"[1+1]+(2*2)-{3/3}\") == True\ncheckio(\"(({[(((1)-2)+3)-3]/3}-3)\") == False\n\n'''\ndef checkio(data):\n #replace this for solution\n return True or False",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
from lxml import etree
if __name__ == '__main__':
# xpath可以解析网页中的内容,html或者xml类型的文件都是<>开头结尾,层次非常明显
# data = '''<div>
# <ul>
# <li class="item-0"><a href="http://www.baidu.com">百度</a></li>
# <li class="item-1"><a href="http://www.baidu.com">百度</a></li>
# </ul>
# </div>
# '''
# tree = etree.HTML(data)
# print(type(data))
# print(tree, type(tree), etree.tostring(tree).decode('utf-8'), sep='\n') # sep,一行打印一个数据
# result = tree.xpath('//li') # //,查找所有li
# for i in result:
# print('--------', etree.tostring(i).decode('utf-8'))
# result = tree.xpath('/html/body/div/ul/li') # / ,查找当前路径
# for r in result:
# print('===', etree.tostring(r).decode('utf-8'))
# class是标签的属性,xpath中用@表示属性
# result = tree.xpath('//li[@class="item-0"]') # 查询所有,//;查li,li;表示条件[]
# for r in result:
# print('--------', etree.tostring(r).decode('utf-8'))
# result = tree.xpath('//a')
# for r in result:
# print('--------', etree.tostring(r).decode('utf-8'))
# 获取a中文本内容
# result = tree.xpath('//a/text()')
# print(result)
# result = tree.xpath('//li[contains(@class, "0")]') # 提取出数据所有li中class属性包含0的元素
# for r in result:
# print('--------', etree.tostring(r).decode('utf-8'))
tree = etree.parse('./data.html')
# print(etree.tostring(tree, encoding='utf-8').decode('utf-8'))
# result = tree.xpath('//li[@id="hehe"]/text()')
# print(result)
result = tree.xpath('//div[@id="p"]//li')
for r in result:
print('--------', etree.tostring(r, encoding='utf-8').decode('utf-8'))
|
normal
|
{
"blob_id": "52c356b903b1fbb8cbf24c899ed86d7bf134a821",
"index": 6387,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n tree = etree.parse('./data.html')\n result = tree.xpath('//div[@id=\"p\"]//li')\n for r in result:\n print('--------', etree.tostring(r, encoding='utf-8').decode('utf-8'))\n",
"step-3": "from lxml import etree\nif __name__ == '__main__':\n tree = etree.parse('./data.html')\n result = tree.xpath('//div[@id=\"p\"]//li')\n for r in result:\n print('--------', etree.tostring(r, encoding='utf-8').decode('utf-8'))\n",
"step-4": "# -*- coding: utf-8 -*-\n\nfrom lxml import etree\n\nif __name__ == '__main__':\n # xpath可以解析网页中的内容,html或者xml类型的文件都是<>开头结尾,层次非常明显\n # data = '''<div>\n # <ul>\n # <li class=\"item-0\"><a href=\"http://www.baidu.com\">百度</a></li>\n # <li class=\"item-1\"><a href=\"http://www.baidu.com\">百度</a></li>\n # </ul>\n # </div>\n # '''\n # tree = etree.HTML(data)\n # print(type(data))\n # print(tree, type(tree), etree.tostring(tree).decode('utf-8'), sep='\\n') # sep,一行打印一个数据\n # result = tree.xpath('//li') # //,查找所有li\n # for i in result:\n # print('--------', etree.tostring(i).decode('utf-8'))\n\n # result = tree.xpath('/html/body/div/ul/li') # / ,查找当前路径\n # for r in result:\n # print('===', etree.tostring(r).decode('utf-8'))\n\n # class是标签的属性,xpath中用@表示属性\n # result = tree.xpath('//li[@class=\"item-0\"]') # 查询所有,//;查li,li;表示条件[]\n # for r in result:\n # print('--------', etree.tostring(r).decode('utf-8'))\n\n # result = tree.xpath('//a')\n # for r in result:\n # print('--------', etree.tostring(r).decode('utf-8'))\n\n # 获取a中文本内容\n # result = tree.xpath('//a/text()')\n # print(result)\n\n # result = tree.xpath('//li[contains(@class, \"0\")]') # 提取出数据所有li中class属性包含0的元素\n # for r in result:\n # print('--------', etree.tostring(r).decode('utf-8'))\n\n tree = etree.parse('./data.html')\n # print(etree.tostring(tree, encoding='utf-8').decode('utf-8'))\n # result = tree.xpath('//li[@id=\"hehe\"]/text()')\n # print(result)\n\n result = tree.xpath('//div[@id=\"p\"]//li')\n for r in result:\n print('--------', etree.tostring(r, encoding='utf-8').decode('utf-8'))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Import Discord Package
import discord
from discord.ext import commands
import asyncio
import glob
from dotenv import load_dotenv
import os
load_dotenv() # Load your Discord Token
TOKEN = os.getenv("TOKEN")
bot = commands.Bot(command_prefix='.',case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("wow.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("THICC.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("barks.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("Welcome.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("grapefruit.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("hello.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("winning.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("basingstoke.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("milleb.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("jew.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("here.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("bwekfast.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run (TOKEN)
|
normal
|
{
"blob_id": "41842e8b75860c65e87e9db1f7ae058957e37e45",
"index": 1822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nload_dotenv()\n<mask token>\nprint('Ready!')\n\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n\n@bot.command()\nasync def wew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'wow.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def thicc(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'THICC.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def woof(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'barks.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'Welcome.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'grapefruit.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def hello(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'hello.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def winning(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'winning.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def basingstoke(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'basingstoke.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def milleb(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'milleb.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def jew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'jew.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'here.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def BWEKFAST(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'bwekfast.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\nbot.run(TOKEN)\n",
"step-3": "<mask token>\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\nbot = commands.Bot(command_prefix='.', case_insensitive=True)\nprint('Ready!')\n\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n\n@bot.command()\nasync def wew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'wow.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def thicc(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'THICC.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def woof(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'barks.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'Welcome.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'grapefruit.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def hello(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'hello.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def winning(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'winning.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def basingstoke(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'basingstoke.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def milleb(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'milleb.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def jew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'jew.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'here.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def BWEKFAST(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'bwekfast.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\nbot.run(TOKEN)\n",
"step-4": "import discord\nfrom discord.ext import commands\nimport asyncio\nimport glob\nfrom dotenv import load_dotenv\nimport os\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\nbot = commands.Bot(command_prefix='.', case_insensitive=True)\nprint('Ready!')\n\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n\n@bot.command()\nasync def wew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'wow.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def thicc(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'THICC.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def woof(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'barks.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'Welcome.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'grapefruit.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def hello(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'hello.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def winning(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'winning.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def basingstoke(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'basingstoke.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def milleb(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'milleb.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def jew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'jew.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'here.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def BWEKFAST(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'bwekfast.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\nbot.run(TOKEN)\n",
"step-5": "#Import Discord Package\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport glob\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv() # Load your Discord Token\n\nTOKEN = os.getenv(\"TOKEN\") \n\nbot = commands.Bot(command_prefix='.',case_insensitive=True)\n \nprint('Ready!')\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n@bot.command()\nasync def wew(ctx):\n\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n \n counter = 0\n\n song = (\"wow.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def thicc(ctx):\n\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"THICC.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n \n\n@bot.command()\nasync def woof(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"barks.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"Welcome.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"grapefruit.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def hello(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"hello.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def winning(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"winning.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def basingstoke(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"basingstoke.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def milleb(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"milleb.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def jew(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"jew.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"here.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def BWEKFAST(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"bwekfast.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n\n\nbot.run (TOKEN)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
for i in range(10):
print('here %s' % i)
time.sleep(1)
print('TEST SUCEEDED')
<|reserved_special_token_1|>
import time
if __name__ == '__main__':
for i in range(10):
print('here %s' % i)
time.sleep(1)
print('TEST SUCEEDED')
|
flexible
|
{
"blob_id": "a159f9f9cc06bb9d22f84781fb2fc664ea204b64",
"index": 6856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n for i in range(10):\n print('here %s' % i)\n time.sleep(1)\n print('TEST SUCEEDED')\n",
"step-3": "import time\nif __name__ == '__main__':\n for i in range(10):\n print('here %s' % i)\n time.sleep(1)\n print('TEST SUCEEDED')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print()
<|reserved_special_token_0|>
driver.get('https://strawpoll.com/jhzd6qwjw')
for i in range(0, n + 1):
driver.delete_all_cookies()
try:
button = WebDriverWait(driver, 10).until(EC.
presence_of_element_located((By.XPATH,
"//input[@value='9c1zz2ugv55r']")))
driver.execute_script('arguments[0].click();', button)
buttons = WebDriverWait(driver, 10).until(EC.
presence_of_element_located((By.XPATH,
"//button[@class='button is-primary is-fullwidth']")))
driver.execute_script('arguments[0].click();', buttons)
except:
print()
try:
c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.XPATH, "//h1[@class='title']")))
driver.back()
print('Vote Successful')
print()
except:
print()
if i == n - 1:
driver.quit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = int(input('Enter the number of votes : '))
print()
path = 'C:\\Program Files\\chromedriver.exe'
driver = webdriver.Chrome(path)
driver.get('https://strawpoll.com/jhzd6qwjw')
for i in range(0, n + 1):
driver.delete_all_cookies()
try:
button = WebDriverWait(driver, 10).until(EC.
presence_of_element_located((By.XPATH,
"//input[@value='9c1zz2ugv55r']")))
driver.execute_script('arguments[0].click();', button)
buttons = WebDriverWait(driver, 10).until(EC.
presence_of_element_located((By.XPATH,
"//button[@class='button is-primary is-fullwidth']")))
driver.execute_script('arguments[0].click();', buttons)
except:
print()
try:
c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.XPATH, "//h1[@class='title']")))
driver.back()
print('Vote Successful')
print()
except:
print()
if i == n - 1:
driver.quit()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
n = int(input('Enter the number of votes : '))
print()
path = 'C:\\Program Files\\chromedriver.exe'
driver = webdriver.Chrome(path)
driver.get('https://strawpoll.com/jhzd6qwjw')
for i in range(0, n + 1):
driver.delete_all_cookies()
try:
button = WebDriverWait(driver, 10).until(EC.
presence_of_element_located((By.XPATH,
"//input[@value='9c1zz2ugv55r']")))
driver.execute_script('arguments[0].click();', button)
buttons = WebDriverWait(driver, 10).until(EC.
presence_of_element_located((By.XPATH,
"//button[@class='button is-primary is-fullwidth']")))
driver.execute_script('arguments[0].click();', buttons)
except:
print()
try:
c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.XPATH, "//h1[@class='title']")))
driver.back()
print('Vote Successful')
print()
except:
print()
if i == n - 1:
driver.quit()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
n=int(input("Enter the number of votes : "))
print()
path="C:\\Program Files\\chromedriver.exe"
driver=webdriver.Chrome(path)
driver.get("https://strawpoll.com/jhzd6qwjw")
for i in range(0,n+1):
driver.delete_all_cookies()
try:
button=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//input[@value='9c1zz2ugv55r']")))
driver.execute_script("arguments[0].click();", button)
buttons=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//button[@class='button is-primary is-fullwidth']")))
driver.execute_script("arguments[0].click();", buttons)
except:
print()
try:
c=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,"//h1[@class='title']")))
driver.back()
print("Vote Successful")
print()
except:
print()
if i==n-1:
driver.quit()
|
flexible
|
{
"blob_id": "0e2b4e8e8c5a728e5123dfa704007b0f6adaf1e1",
"index": 4561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint()\n<mask token>\ndriver.get('https://strawpoll.com/jhzd6qwjw')\nfor i in range(0, n + 1):\n driver.delete_all_cookies()\n try:\n button = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//input[@value='9c1zz2ugv55r']\")))\n driver.execute_script('arguments[0].click();', button)\n buttons = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//button[@class='button is-primary is-fullwidth']\")))\n driver.execute_script('arguments[0].click();', buttons)\n except:\n print()\n try:\n c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='title']\")))\n driver.back()\n print('Vote Successful')\n print()\n except:\n print()\n if i == n - 1:\n driver.quit()\n",
"step-3": "<mask token>\nn = int(input('Enter the number of votes : '))\nprint()\npath = 'C:\\\\Program Files\\\\chromedriver.exe'\ndriver = webdriver.Chrome(path)\ndriver.get('https://strawpoll.com/jhzd6qwjw')\nfor i in range(0, n + 1):\n driver.delete_all_cookies()\n try:\n button = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//input[@value='9c1zz2ugv55r']\")))\n driver.execute_script('arguments[0].click();', button)\n buttons = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//button[@class='button is-primary is-fullwidth']\")))\n driver.execute_script('arguments[0].click();', buttons)\n except:\n print()\n try:\n c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='title']\")))\n driver.back()\n print('Vote Successful')\n print()\n except:\n print()\n if i == n - 1:\n driver.quit()\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nn = int(input('Enter the number of votes : '))\nprint()\npath = 'C:\\\\Program Files\\\\chromedriver.exe'\ndriver = webdriver.Chrome(path)\ndriver.get('https://strawpoll.com/jhzd6qwjw')\nfor i in range(0, n + 1):\n driver.delete_all_cookies()\n try:\n button = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//input[@value='9c1zz2ugv55r']\")))\n driver.execute_script('arguments[0].click();', button)\n buttons = WebDriverWait(driver, 10).until(EC.\n presence_of_element_located((By.XPATH,\n \"//button[@class='button is-primary is-fullwidth']\")))\n driver.execute_script('arguments[0].click();', buttons)\n except:\n print()\n try:\n c = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='title']\")))\n driver.back()\n print('Vote Successful')\n print()\n except:\n print()\n if i == n - 1:\n driver.quit()\n",
"step-5": "from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By \r\nfrom selenium.webdriver.support.ui import WebDriverWait \r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time\r\nn=int(input(\"Enter the number of votes : \"))\r\nprint()\r\npath=\"C:\\\\Program Files\\\\chromedriver.exe\"\r\ndriver=webdriver.Chrome(path)\r\ndriver.get(\"https://strawpoll.com/jhzd6qwjw\")\r\nfor i in range(0,n+1):\r\n driver.delete_all_cookies()\r\n try:\r\n button=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//input[@value='9c1zz2ugv55r']\")))\r\n driver.execute_script(\"arguments[0].click();\", button)\r\n buttons=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//button[@class='button is-primary is-fullwidth']\")))\r\n driver.execute_script(\"arguments[0].click();\", buttons)\r\n except:\r\n print()\r\n try:\r\n c=WebDriverWait(driver,10).until(EC.presence_of_element_located((By.XPATH,\"//h1[@class='title']\")))\r\n driver.back()\r\n print(\"Vote Successful\")\r\n print()\r\n except:\r\n print()\r\n if i==n-1:\r\n driver.quit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Session:
<|reserved_special_token_0|>
class StateEnum:
"""
A class for defining session state.
"""
ACTIVE = 'Active'
INACTIVE = 'Inactive'
CLOSED = 'Closed'
KILLED = 'Killed'
EXPIRED = 'Expired'
DISABLED = 'Disabled'
def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):
self._ticket = ticket
self._state = Session.StateEnum.INACTIVE
self._create_date = time.time()
self._id = unique_id_services.get_id('session_id')
self._context = SessionContext(self)
self._user_id = user.id
self._client_ip = client_ip
self._client_request = None
self._lifetime = lifetime
def get_client_ip(self):
"""
Returns the user IP address.
"""
return self._client_ip
def close(self):
"""
Closes the session.
"""
session_services.close_session(self)
def active(self, client_request):
"""
Activates the session. Sets this session to current thread.
"""
self._set_client_request(client_request)
thread = current_thread()
thread.__LOCALE__ = client_request.context.get('__LOCALE__')
session_services.active_session(self)
def _set_client_request(self, client_request):
"""
Sets call context to session.
"""
if client_request.context is None:
client_request.context = {}
self._client_request = copy.deepcopy(client_request)
def get_call_context(self):
"""
Returns call context.
@return {}
"""
return self._client_request.context
def get_internal_context(self):
"""
Retunrs internal system context for the current call
@rtype: dict
@return: internal context dictionary
"""
if not hasattr(self._client_request, 'internal_context'
) or self._client_request.internal_context is None:
self._client_request.internal_context = {}
return self._client_request.internal_context
def get_client_request(self):
"""
Returns current client request.
@rtype: ClientRequest
@return: client request
"""
return self._client_request
<|reserved_special_token_0|>
def get_id(self):
"""
Returns session ID.
@return: int
"""
return self._id
def get_user(self):
"""
Returns the user which creates this session.
@return: user
"""
return security_services.get_user(self._user_id)
def get_user_id(self):
"""
Returns the user which creates this session.
@return: user
"""
return self._user_id
def update(self):
"""
Updates session.
"""
session_services.update_session(self)
def cleanup(self):
"""
Cleanups the session.
"""
session_services.cleanup_session(self)
def get_state(self):
"""
Returns the session state.
@return: str
"""
return self._state
<|reserved_special_token_0|>
def get_creation_date(self):
"""
Returns the session creation date.
@return:
"""
return time.ctime(self._create_date)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.get_ticket())
def is_expired(self):
"""
If session is expired, returns True.
@return: Is expired
@rtype: bool
"""
if self._lifetime is not None and self._lifetime > 0:
if (time.time() - self._create_date
) * 1000 > self._lifetime + 300000:
return True
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Session:
<|reserved_special_token_0|>
class StateEnum:
"""
A class for defining session state.
"""
ACTIVE = 'Active'
INACTIVE = 'Inactive'
CLOSED = 'Closed'
KILLED = 'Killed'
EXPIRED = 'Expired'
DISABLED = 'Disabled'
def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):
self._ticket = ticket
self._state = Session.StateEnum.INACTIVE
self._create_date = time.time()
self._id = unique_id_services.get_id('session_id')
self._context = SessionContext(self)
self._user_id = user.id
self._client_ip = client_ip
self._client_request = None
self._lifetime = lifetime
def get_client_ip(self):
"""
Returns the user IP address.
"""
return self._client_ip
def close(self):
"""
Closes the session.
"""
session_services.close_session(self)
def active(self, client_request):
"""
Activates the session. Sets this session to current thread.
"""
self._set_client_request(client_request)
thread = current_thread()
thread.__LOCALE__ = client_request.context.get('__LOCALE__')
session_services.active_session(self)
def _set_client_request(self, client_request):
"""
Sets call context to session.
"""
if client_request.context is None:
client_request.context = {}
self._client_request = copy.deepcopy(client_request)
def get_call_context(self):
"""
Returns call context.
@return {}
"""
return self._client_request.context
def get_internal_context(self):
"""
Retunrs internal system context for the current call
@rtype: dict
@return: internal context dictionary
"""
if not hasattr(self._client_request, 'internal_context'
) or self._client_request.internal_context is None:
self._client_request.internal_context = {}
return self._client_request.internal_context
def get_client_request(self):
"""
Returns current client request.
@rtype: ClientRequest
@return: client request
"""
return self._client_request
def get_ticket(self):
"""
Returns session ID.
@return: str
"""
return self._ticket
def get_id(self):
"""
Returns session ID.
@return: int
"""
return self._id
def get_user(self):
"""
Returns the user which creates this session.
@return: user
"""
return security_services.get_user(self._user_id)
def get_user_id(self):
"""
Returns the user which creates this session.
@return: user
"""
return self._user_id
def update(self):
"""
Updates session.
"""
session_services.update_session(self)
def cleanup(self):
"""
Cleanups the session.
"""
session_services.cleanup_session(self)
def get_state(self):
"""
Returns the session state.
@return: str
"""
return self._state
def set_state(self, state):
"""
Returns the session state.
@return: str
"""
self._state = state
self.update()
def get_creation_date(self):
"""
Returns the session creation date.
@return:
"""
return time.ctime(self._create_date)
def get_context(self):
"""
Returns session context.
@return: SessionContext
"""
return self._context
<|reserved_special_token_0|>
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.get_ticket())
def is_expired(self):
"""
If session is expired, returns True.
@return: Is expired
@rtype: bool
"""
if self._lifetime is not None and self._lifetime > 0:
if (time.time() - self._create_date
) * 1000 > self._lifetime + 300000:
return True
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SessionContext(dict):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __setitem__(self, key, value):
"""
Sets new item or updates existing item in context
@param key:
@param value:
"""
result = super(SessionContext, self).__setitem__(key, value)
session_services.get_session(self._ticket, False).update()
return result
class Session:
"""
A class for storing session information.
"""
class StateEnum:
"""
A class for defining session state.
"""
ACTIVE = 'Active'
INACTIVE = 'Inactive'
CLOSED = 'Closed'
KILLED = 'Killed'
EXPIRED = 'Expired'
DISABLED = 'Disabled'
def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):
self._ticket = ticket
self._state = Session.StateEnum.INACTIVE
self._create_date = time.time()
self._id = unique_id_services.get_id('session_id')
self._context = SessionContext(self)
self._user_id = user.id
self._client_ip = client_ip
self._client_request = None
self._lifetime = lifetime
def get_client_ip(self):
"""
Returns the user IP address.
"""
return self._client_ip
def close(self):
"""
Closes the session.
"""
session_services.close_session(self)
def active(self, client_request):
"""
Activates the session. Sets this session to current thread.
"""
self._set_client_request(client_request)
thread = current_thread()
thread.__LOCALE__ = client_request.context.get('__LOCALE__')
session_services.active_session(self)
def _set_client_request(self, client_request):
"""
Sets call context to session.
"""
if client_request.context is None:
client_request.context = {}
self._client_request = copy.deepcopy(client_request)
def get_call_context(self):
"""
Returns call context.
@return {}
"""
return self._client_request.context
def get_internal_context(self):
"""
Retunrs internal system context for the current call
@rtype: dict
@return: internal context dictionary
"""
if not hasattr(self._client_request, 'internal_context'
) or self._client_request.internal_context is None:
self._client_request.internal_context = {}
return self._client_request.internal_context
def get_client_request(self):
"""
Returns current client request.
@rtype: ClientRequest
@return: client request
"""
return self._client_request
def get_ticket(self):
"""
Returns session ID.
@return: str
"""
return self._ticket
def get_id(self):
"""
Returns session ID.
@return: int
"""
return self._id
def get_user(self):
"""
Returns the user which creates this session.
@return: user
"""
return security_services.get_user(self._user_id)
def get_user_id(self):
"""
Returns the user which creates this session.
@return: user
"""
return self._user_id
def update(self):
"""
Updates session.
"""
session_services.update_session(self)
def cleanup(self):
"""
Cleanups the session.
"""
session_services.cleanup_session(self)
def get_state(self):
"""
Returns the session state.
@return: str
"""
return self._state
def set_state(self, state):
"""
Returns the session state.
@return: str
"""
self._state = state
self.update()
def get_creation_date(self):
"""
Returns the session creation date.
@return:
"""
return time.ctime(self._create_date)
def get_context(self):
"""
Returns session context.
@return: SessionContext
"""
return self._context
def __str__(self):
return '%s[%s]' % (self.__class__.__name__, self.get_ticket())
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.get_ticket())
def is_expired(self):
"""
If session is expired, returns True.
@return: Is expired
@rtype: bool
"""
if self._lifetime is not None and self._lifetime > 0:
if (time.time() - self._create_date
) * 1000 > self._lifetime + 300000:
return True
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SessionException(DeltaException):
<|reserved_special_token_0|>
pass
class SessionContext(dict):
"""
A class for saving some data in session domain.
"""
def __init__(self, session):
"""
@param session:
"""
super(SessionContext, self).__init__()
self._ticket = session.get_ticket()
def __setitem__(self, key, value):
"""
Sets new item or updates existing item in context
@param key:
@param value:
"""
result = super(SessionContext, self).__setitem__(key, value)
session_services.get_session(self._ticket, False).update()
return result
class Session:
"""
A class for storing session information.
"""
class StateEnum:
"""
A class for defining session state.
"""
ACTIVE = 'Active'
INACTIVE = 'Inactive'
CLOSED = 'Closed'
KILLED = 'Killed'
EXPIRED = 'Expired'
DISABLED = 'Disabled'
def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):
self._ticket = ticket
self._state = Session.StateEnum.INACTIVE
self._create_date = time.time()
self._id = unique_id_services.get_id('session_id')
self._context = SessionContext(self)
self._user_id = user.id
self._client_ip = client_ip
self._client_request = None
self._lifetime = lifetime
def get_client_ip(self):
"""
Returns the user IP address.
"""
return self._client_ip
def close(self):
"""
Closes the session.
"""
session_services.close_session(self)
def active(self, client_request):
"""
Activates the session. Sets this session to current thread.
"""
self._set_client_request(client_request)
thread = current_thread()
thread.__LOCALE__ = client_request.context.get('__LOCALE__')
session_services.active_session(self)
def _set_client_request(self, client_request):
"""
Sets call context to session.
"""
if client_request.context is None:
client_request.context = {}
self._client_request = copy.deepcopy(client_request)
def get_call_context(self):
"""
Returns call context.
@return {}
"""
return self._client_request.context
def get_internal_context(self):
"""
Retunrs internal system context for the current call
@rtype: dict
@return: internal context dictionary
"""
if not hasattr(self._client_request, 'internal_context'
) or self._client_request.internal_context is None:
self._client_request.internal_context = {}
return self._client_request.internal_context
def get_client_request(self):
"""
Returns current client request.
@rtype: ClientRequest
@return: client request
"""
return self._client_request
def get_ticket(self):
"""
Returns session ID.
@return: str
"""
return self._ticket
def get_id(self):
"""
Returns session ID.
@return: int
"""
return self._id
def get_user(self):
"""
Returns the user which creates this session.
@return: user
"""
return security_services.get_user(self._user_id)
def get_user_id(self):
"""
Returns the user which creates this session.
@return: user
"""
return self._user_id
def update(self):
"""
Updates session.
"""
session_services.update_session(self)
def cleanup(self):
"""
Cleanups the session.
"""
session_services.cleanup_session(self)
def get_state(self):
"""
Returns the session state.
@return: str
"""
return self._state
def set_state(self, state):
"""
Returns the session state.
@return: str
"""
self._state = state
self.update()
def get_creation_date(self):
"""
Returns the session creation date.
@return:
"""
return time.ctime(self._create_date)
def get_context(self):
"""
Returns session context.
@return: SessionContext
"""
return self._context
def __str__(self):
return '%s[%s]' % (self.__class__.__name__, self.get_ticket())
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, self.get_ticket())
def is_expired(self):
"""
If session is expired, returns True.
@return: Is expired
@rtype: bool
"""
if self._lifetime is not None and self._lifetime > 0:
if (time.time() - self._create_date
) * 1000 > self._lifetime + 300000:
return True
return False
<|reserved_special_token_1|>
'''
Created on May 18, 2010
@author: Abi.Mohammadi & Majid.Vesal
'''
from threading import current_thread
import copy
import time
from deltapy.core import DeltaException, Context
import deltapy.security.services as security_services
import deltapy.security.session.services as session_services
import deltapy.unique_id.services as unique_id_services
class SessionException(DeltaException):
'''
A class for handling session exceptions.
'''
pass
#class SessionContext(Context):
# '''
# A class for saving some data in session domain.
# '''
#
# def __init__(self, session):
# '''
# @param session:
# '''
#
# Context.__init__(self)
# self['__session__'] = session
#
# def __setitem__(self, key, value):
# '''
# Sets new item or updates existing item in context
#
# @param key:
# @param value:
# '''
#
# result = Context.__setitem__(self, key, value)
# self['__session__'].update()
# return result
class SessionContext(dict):
'''
A class for saving some data in session domain.
'''
def __init__(self, session):
'''
@param session:
'''
super(SessionContext, self).__init__()
self._ticket = session.get_ticket()
def __setitem__(self, key, value):
'''
Sets new item or updates existing item in context
@param key:
@param value:
'''
result = super(SessionContext, self).__setitem__(key, value)
# Updating session because of this change in session context
session_services.get_session(self._ticket, False).update()
return result
class Session:
"""
A class for storing session information.
"""
class StateEnum:
'''
A class for defining session state.
'''
ACTIVE = "Active"
INACTIVE = "Inactive"
CLOSED = "Closed"
KILLED = "Killed"
EXPIRED = "Expired"
DISABLED = "Disabled"
def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):
self._ticket = ticket
self._state = Session.StateEnum.INACTIVE
self._create_date = time.time()
self._id = unique_id_services.get_id('session_id')
self._context = SessionContext(self)
self._user_id = user.id
self._client_ip = client_ip
self._client_request = None
self._lifetime = lifetime # millisecond
def get_client_ip(self):
'''
Returns the user IP address.
'''
return self._client_ip
def close(self):
'''
Closes the session.
'''
session_services.close_session(self)
def active(self, client_request):
'''
Activates the session. Sets this session to current thread.
'''
self._set_client_request(client_request)
thread = current_thread()
thread.__LOCALE__ = client_request.context.get('__LOCALE__')
session_services.active_session(self)
def _set_client_request(self, client_request):
'''
Sets call context to session.
'''
if client_request.context is None:
client_request.context = {}
self._client_request = copy.deepcopy(client_request)
def get_call_context(self):
'''
Returns call context.
@return {}
'''
return self._client_request.context
def get_internal_context(self):
'''
Retunrs internal system context for the current call
@rtype: dict
@return: internal context dictionary
'''
if not hasattr(self._client_request, 'internal_context') or \
self._client_request.internal_context is None:
self._client_request.internal_context = {}
return self._client_request.internal_context
def get_client_request(self):
'''
Returns current client request.
@rtype: ClientRequest
@return: client request
'''
return self._client_request
def get_ticket(self):
'''
Returns session ID.
@return: str
'''
return self._ticket
def get_id(self):
'''
Returns session ID.
@return: int
'''
return self._id
def get_user(self):
'''
Returns the user which creates this session.
@return: user
'''
return security_services.get_user(self._user_id)
def get_user_id(self):
'''
Returns the user which creates this session.
@return: user
'''
return self._user_id
def update(self):
'''
Updates session.
'''
session_services.update_session(self)
def cleanup(self):
'''
Cleanups the session.
'''
session_services.cleanup_session(self)
def get_state(self):
'''
Returns the session state.
@return: str
'''
return self._state
def set_state(self, state):
'''
Returns the session state.
@return: str
'''
self._state = state
self.update()
def get_creation_date(self):
'''
Returns the session creation date.
@return:
'''
return time.ctime(self._create_date)
def get_context(self):
'''
Returns session context.
@return: SessionContext
'''
return self._context
def __str__(self):
return "%s[%s]" % (self.__class__.__name__, self.get_ticket())
def __repr__(self):
return "%s[%s]" % (self.__class__.__name__, self.get_ticket())
def is_expired(self):
"""
If session is expired, returns True.
@return: Is expired
@rtype: bool
"""
if self._lifetime is not None and self._lifetime > 0:
# 300 seconds waite is the tolerance !
# The unit of lifetime is millisecond
if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:
return True
return False
|
flexible
|
{
"blob_id": "80469fd945a21c1bd2b5590047016a4b60880c88",
"index": 7006,
"step-1": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n <mask token>\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n <mask token>\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-2": "<mask token>\n\n\nclass Session:\n <mask token>\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n\n def get_ticket(self):\n \"\"\"\n Returns session ID.\n \n @return: str\n \"\"\"\n return self._ticket\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n\n def set_state(self, state):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n self._state = state\n self.update()\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n\n def get_context(self):\n \"\"\"\n Returns session context.\n \n @return: SessionContext\n \"\"\"\n return self._context\n <mask token>\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-3": "<mask token>\n\n\nclass SessionContext(dict):\n <mask token>\n <mask token>\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets new item or updates existing item in context\n \n @param key:\n @param value:\n \"\"\"\n result = super(SessionContext, self).__setitem__(key, value)\n session_services.get_session(self._ticket, False).update()\n return result\n\n\nclass Session:\n \"\"\"\n A class for storing session information.\n \"\"\"\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n\n def get_ticket(self):\n \"\"\"\n Returns session ID.\n \n @return: str\n \"\"\"\n return self._ticket\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n\n def set_state(self, state):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n self._state = state\n self.update()\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n\n def get_context(self):\n \"\"\"\n Returns session context.\n \n @return: SessionContext\n \"\"\"\n return self._context\n\n def __str__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-4": "<mask token>\n\n\nclass SessionException(DeltaException):\n <mask token>\n pass\n\n\nclass SessionContext(dict):\n \"\"\"\n A class for saving some data in session domain.\n \"\"\"\n\n def __init__(self, session):\n \"\"\"\n @param session:\n \"\"\"\n super(SessionContext, self).__init__()\n self._ticket = session.get_ticket()\n\n def __setitem__(self, key, value):\n \"\"\"\n Sets new item or updates existing item in context\n \n @param key:\n @param value:\n \"\"\"\n result = super(SessionContext, self).__setitem__(key, value)\n session_services.get_session(self._ticket, False).update()\n return result\n\n\nclass Session:\n \"\"\"\n A class for storing session information.\n \"\"\"\n\n\n class StateEnum:\n \"\"\"\n A class for defining session state.\n \"\"\"\n ACTIVE = 'Active'\n INACTIVE = 'Inactive'\n CLOSED = 'Closed'\n KILLED = 'Killed'\n EXPIRED = 'Expired'\n DISABLED = 'Disabled'\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime\n\n def get_client_ip(self):\n \"\"\"\n Returns the user IP address.\n \"\"\"\n return self._client_ip\n\n def close(self):\n \"\"\"\n Closes the session.\n \"\"\"\n session_services.close_session(self)\n\n def active(self, client_request):\n \"\"\"\n Activates the session. Sets this session to current thread.\n \"\"\"\n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n\n def _set_client_request(self, client_request):\n \"\"\"\n Sets call context to session.\n \"\"\"\n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n\n def get_call_context(self):\n \"\"\"\n Returns call context.\n \n @return {}\n \"\"\"\n return self._client_request.context\n\n def get_internal_context(self):\n \"\"\"\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n \"\"\"\n if not hasattr(self._client_request, 'internal_context'\n ) or self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n return self._client_request.internal_context\n\n def get_client_request(self):\n \"\"\"\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n \"\"\"\n return self._client_request\n\n def get_ticket(self):\n \"\"\"\n Returns session ID.\n \n @return: str\n \"\"\"\n return self._ticket\n\n def get_id(self):\n \"\"\"\n Returns session ID.\n \n @return: int\n \"\"\"\n return self._id\n\n def get_user(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return security_services.get_user(self._user_id)\n\n def get_user_id(self):\n \"\"\"\n Returns the user which creates this session.\n \n @return: user\n \"\"\"\n return self._user_id\n\n def update(self):\n \"\"\"\n Updates session.\n \"\"\"\n session_services.update_session(self)\n\n def cleanup(self):\n \"\"\"\n Cleanups the session.\n \"\"\"\n session_services.cleanup_session(self)\n\n def get_state(self):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n return self._state\n\n def set_state(self, state):\n \"\"\"\n Returns the session state.\n \n @return: str\n \"\"\"\n self._state = state\n self.update()\n\n def get_creation_date(self):\n \"\"\"\n Returns the session creation date.\n \n @return: \n \"\"\"\n return time.ctime(self._create_date)\n\n def get_context(self):\n \"\"\"\n Returns session context.\n \n @return: SessionContext\n \"\"\"\n return self._context\n\n def __str__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def __repr__(self):\n return '%s[%s]' % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n if self._lifetime is not None and self._lifetime > 0:\n if (time.time() - self._create_date\n ) * 1000 > self._lifetime + 300000:\n return True\n return False\n",
"step-5": "'''\nCreated on May 18, 2010\n\n@author: Abi.Mohammadi & Majid.Vesal\n'''\n\nfrom threading import current_thread\n\nimport copy\nimport time\n\nfrom deltapy.core import DeltaException, Context\n\nimport deltapy.security.services as security_services\nimport deltapy.security.session.services as session_services\nimport deltapy.unique_id.services as unique_id_services\n\nclass SessionException(DeltaException):\n '''\n A class for handling session exceptions.\n '''\n pass\n\n#class SessionContext(Context):\n# '''\n# A class for saving some data in session domain.\n# '''\n# \n# def __init__(self, session):\n# '''\n# @param session:\n# '''\n# \n# Context.__init__(self)\n# self['__session__'] = session\n# \n# def __setitem__(self, key, value):\n# '''\n# Sets new item or updates existing item in context\n# \n# @param key:\n# @param value:\n# '''\n# \n# result = Context.__setitem__(self, key, value)\n# self['__session__'].update()\n# return result\n\nclass SessionContext(dict):\n '''\n A class for saving some data in session domain.\n '''\n \n def __init__(self, session):\n '''\n @param session:\n '''\n \n super(SessionContext, self).__init__()\n self._ticket = session.get_ticket()\n \n def __setitem__(self, key, value):\n '''\n Sets new item or updates existing item in context\n \n @param key:\n @param value:\n '''\n result = super(SessionContext, self).__setitem__(key, value)\n \n # Updating session because of this change in session context\n session_services.get_session(self._ticket, False).update()\n \n return result\n\nclass Session:\n \"\"\"\n A class for storing session information.\n \"\"\"\n\n class StateEnum:\n '''\n A class for defining session state.\n '''\n ACTIVE = \"Active\"\n INACTIVE = \"Inactive\"\n CLOSED = \"Closed\"\n KILLED = \"Killed\"\n EXPIRED = \"Expired\"\n DISABLED = \"Disabled\"\n\n def __init__(self, ticket=None, user=None, client_ip=None, lifetime=None):\n self._ticket = ticket\n self._state = Session.StateEnum.INACTIVE\n self._create_date = time.time()\n self._id = unique_id_services.get_id('session_id')\n self._context = SessionContext(self)\n self._user_id = user.id\n self._client_ip = client_ip\n self._client_request = None\n self._lifetime = lifetime # millisecond\n \n def get_client_ip(self):\n '''\n Returns the user IP address.\n '''\n \n return self._client_ip\n \n def close(self):\n '''\n Closes the session.\n '''\n session_services.close_session(self)\n \n def active(self, client_request):\n '''\n Activates the session. Sets this session to current thread.\n '''\n \n self._set_client_request(client_request)\n thread = current_thread()\n thread.__LOCALE__ = client_request.context.get('__LOCALE__')\n session_services.active_session(self)\n \n def _set_client_request(self, client_request):\n '''\n Sets call context to session.\n '''\n \n if client_request.context is None:\n client_request.context = {}\n self._client_request = copy.deepcopy(client_request)\n \n def get_call_context(self):\n '''\n Returns call context.\n \n @return {}\n '''\n \n return self._client_request.context\n\n def get_internal_context(self):\n '''\n Retunrs internal system context for the current call\n\n @rtype: dict\n @return: internal context dictionary\n '''\n\n if not hasattr(self._client_request, 'internal_context') or \\\n self._client_request.internal_context is None:\n self._client_request.internal_context = {}\n\n return self._client_request.internal_context\n \n def get_client_request(self):\n '''\n Returns current client request.\n \n @rtype: ClientRequest\n @return: client request\n '''\n \n return self._client_request\n\n def get_ticket(self):\n '''\n Returns session ID.\n \n @return: str\n '''\n \n return self._ticket\n \n def get_id(self):\n '''\n Returns session ID.\n \n @return: int\n '''\n \n return self._id\n \n\n def get_user(self):\n '''\n Returns the user which creates this session.\n \n @return: user\n '''\n \n return security_services.get_user(self._user_id)\n \n def get_user_id(self):\n '''\n Returns the user which creates this session.\n \n @return: user\n '''\n \n return self._user_id\n\n def update(self):\n '''\n Updates session.\n '''\n \n session_services.update_session(self)\n \n def cleanup(self):\n '''\n Cleanups the session.\n '''\n \n session_services.cleanup_session(self)\n \n def get_state(self):\n '''\n Returns the session state.\n \n @return: str\n '''\n \n return self._state\n \n def set_state(self, state):\n '''\n Returns the session state.\n \n @return: str\n '''\n \n self._state = state\n self.update()\n\n def get_creation_date(self):\n '''\n Returns the session creation date.\n \n @return: \n '''\n \n return time.ctime(self._create_date)\n \n def get_context(self):\n '''\n Returns session context.\n \n @return: SessionContext\n '''\n \n return self._context \n \n def __str__(self):\n return \"%s[%s]\" % (self.__class__.__name__, self.get_ticket())\n \n def __repr__(self):\n return \"%s[%s]\" % (self.__class__.__name__, self.get_ticket())\n\n def is_expired(self):\n \"\"\"\n If session is expired, returns True.\n\n @return: Is expired\n @rtype: bool\n \"\"\"\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False\n",
"step-ids": [
18,
21,
25,
28,
31
]
}
|
[
18,
21,
25,
28,
31
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
st.set_option('deprecation.showfileUploaderEncoding', False)
np.set_printoptions(suppress=True)
<|reserved_special_token_0|>
st.title('Leaf Disease Detection Using Machine Learning')
<|reserved_special_token_0|>
if uploaded_file is not None:
image = Image.open(uploaded_file)
size = 224, 224
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image_array = np.asarray(image)
st.image(image, caption='Uploaded Image.', width=300)
normalized_image_array = image_array.astype(np.float32) / 127.0 - 1
data[0] = normalized_image_array
prediction = model.predict(data)
data = np.rint(prediction)
print(data)
if data[0][0] == 1:
st.write('Grape___Black_rot')
if data[0][1] == 1:
st.write('Grape___Esca_(Black_Measles)')
if data[0][2] == 1:
st.write('Grape___healthy')
if data[0][3] == 1:
st.write('Grape___Leaf_blight')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
st.set_option('deprecation.showfileUploaderEncoding', False)
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
st.title('Leaf Disease Detection Using Machine Learning')
uploaded_file = st.file_uploader('Choose an image...', type='JPG')
if uploaded_file is not None:
image = Image.open(uploaded_file)
size = 224, 224
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image_array = np.asarray(image)
st.image(image, caption='Uploaded Image.', width=300)
normalized_image_array = image_array.astype(np.float32) / 127.0 - 1
data[0] = normalized_image_array
prediction = model.predict(data)
data = np.rint(prediction)
print(data)
if data[0][0] == 1:
st.write('Grape___Black_rot')
if data[0][1] == 1:
st.write('Grape___Esca_(Black_Measles)')
if data[0][2] == 1:
st.write('Grape___healthy')
if data[0][3] == 1:
st.write('Grape___Leaf_blight')
<|reserved_special_token_1|>
import streamlit as st
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
st.set_option('deprecation.showfileUploaderEncoding', False)
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
st.title('Leaf Disease Detection Using Machine Learning')
uploaded_file = st.file_uploader('Choose an image...', type='JPG')
if uploaded_file is not None:
image = Image.open(uploaded_file)
size = 224, 224
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image_array = np.asarray(image)
st.image(image, caption='Uploaded Image.', width=300)
normalized_image_array = image_array.astype(np.float32) / 127.0 - 1
data[0] = normalized_image_array
prediction = model.predict(data)
data = np.rint(prediction)
print(data)
if data[0][0] == 1:
st.write('Grape___Black_rot')
if data[0][1] == 1:
st.write('Grape___Esca_(Black_Measles)')
if data[0][2] == 1:
st.write('Grape___healthy')
if data[0][3] == 1:
st.write('Grape___Leaf_blight')
<|reserved_special_token_1|>
import streamlit as st
import tensorflow.keras
from PIL import Image, ImageOps
import numpy as np
st.set_option('deprecation.showfileUploaderEncoding', False)
np.set_printoptions(suppress=True)
model = tensorflow.keras.models.load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
st.title("Leaf Disease Detection Using Machine Learning")
uploaded_file = st.file_uploader("Choose an image...", type="JPG")
if uploaded_file is not None:
image = Image.open(uploaded_file)
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
image_array = np.asarray(image)
#image.show()
st.image(image, caption='Uploaded Image.', width=300)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
data[0] = normalized_image_array
prediction = model.predict(data)
data = np.rint(prediction)
print(data)
if(data[0][0]==1):
st.write("Grape___Black_rot")
if(data[0][1]==1):
st.write("Grape___Esca_(Black_Measles)")
if(data[0][2]==1):
st.write("Grape___healthy")
if(data[0][3]==1):
st.write("Grape___Leaf_blight")
|
flexible
|
{
"blob_id": "746e0895f0fb971156e778cbff20317cc88441f1",
"index": 2059,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nst.set_option('deprecation.showfileUploaderEncoding', False)\nnp.set_printoptions(suppress=True)\n<mask token>\nst.title('Leaf Disease Detection Using Machine Learning')\n<mask token>\nif uploaded_file is not None:\n image = Image.open(uploaded_file)\n size = 224, 224\n image = ImageOps.fit(image, size, Image.ANTIALIAS)\n image_array = np.asarray(image)\n st.image(image, caption='Uploaded Image.', width=300)\n normalized_image_array = image_array.astype(np.float32) / 127.0 - 1\n data[0] = normalized_image_array\n prediction = model.predict(data)\n data = np.rint(prediction)\n print(data)\n if data[0][0] == 1:\n st.write('Grape___Black_rot')\n if data[0][1] == 1:\n st.write('Grape___Esca_(Black_Measles)')\n if data[0][2] == 1:\n st.write('Grape___healthy')\n if data[0][3] == 1:\n st.write('Grape___Leaf_blight')\n",
"step-3": "<mask token>\nst.set_option('deprecation.showfileUploaderEncoding', False)\nnp.set_printoptions(suppress=True)\nmodel = tensorflow.keras.models.load_model('keras_model.h5')\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\nst.title('Leaf Disease Detection Using Machine Learning')\nuploaded_file = st.file_uploader('Choose an image...', type='JPG')\nif uploaded_file is not None:\n image = Image.open(uploaded_file)\n size = 224, 224\n image = ImageOps.fit(image, size, Image.ANTIALIAS)\n image_array = np.asarray(image)\n st.image(image, caption='Uploaded Image.', width=300)\n normalized_image_array = image_array.astype(np.float32) / 127.0 - 1\n data[0] = normalized_image_array\n prediction = model.predict(data)\n data = np.rint(prediction)\n print(data)\n if data[0][0] == 1:\n st.write('Grape___Black_rot')\n if data[0][1] == 1:\n st.write('Grape___Esca_(Black_Measles)')\n if data[0][2] == 1:\n st.write('Grape___healthy')\n if data[0][3] == 1:\n st.write('Grape___Leaf_blight')\n",
"step-4": "import streamlit as st\nimport tensorflow.keras\nfrom PIL import Image, ImageOps\nimport numpy as np\nst.set_option('deprecation.showfileUploaderEncoding', False)\nnp.set_printoptions(suppress=True)\nmodel = tensorflow.keras.models.load_model('keras_model.h5')\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\nst.title('Leaf Disease Detection Using Machine Learning')\nuploaded_file = st.file_uploader('Choose an image...', type='JPG')\nif uploaded_file is not None:\n image = Image.open(uploaded_file)\n size = 224, 224\n image = ImageOps.fit(image, size, Image.ANTIALIAS)\n image_array = np.asarray(image)\n st.image(image, caption='Uploaded Image.', width=300)\n normalized_image_array = image_array.astype(np.float32) / 127.0 - 1\n data[0] = normalized_image_array\n prediction = model.predict(data)\n data = np.rint(prediction)\n print(data)\n if data[0][0] == 1:\n st.write('Grape___Black_rot')\n if data[0][1] == 1:\n st.write('Grape___Esca_(Black_Measles)')\n if data[0][2] == 1:\n st.write('Grape___healthy')\n if data[0][3] == 1:\n st.write('Grape___Leaf_blight')\n",
"step-5": "import streamlit as st\nimport tensorflow.keras\nfrom PIL import Image, ImageOps\nimport numpy as np\nst.set_option('deprecation.showfileUploaderEncoding', False)\nnp.set_printoptions(suppress=True)\nmodel = tensorflow.keras.models.load_model('keras_model.h5')\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\nst.title(\"Leaf Disease Detection Using Machine Learning\")\n\nuploaded_file = st.file_uploader(\"Choose an image...\", type=\"JPG\")\nif uploaded_file is not None:\n image = Image.open(uploaded_file)\n size = (224, 224)\n image = ImageOps.fit(image, size, Image.ANTIALIAS)\n\n image_array = np.asarray(image)\n #image.show()\n st.image(image, caption='Uploaded Image.', width=300)\n normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\n data[0] = normalized_image_array\n prediction = model.predict(data)\n data = np.rint(prediction)\n print(data)\n if(data[0][0]==1):\n st.write(\"Grape___Black_rot\")\n if(data[0][1]==1):\n st.write(\"Grape___Esca_(Black_Measles)\")\n if(data[0][2]==1):\n st.write(\"Grape___healthy\")\n if(data[0][3]==1):\n st.write(\"Grape___Leaf_blight\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def welcome(request):
return render(request, 'welcome.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def welcome(request):
return render(request, 'welcome.html')
def users(request):
form = NewUserForm()
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
form.save(commit=True)
return welcome(request)
else:
print('ERROR FORM INVALID')
return render(request, 'users.html', {'form': form})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse
from appTwo.forms import NewUserForm
def welcome(request):
return render(request, 'welcome.html')
def users(request):
form = NewUserForm()
if request.method == 'POST':
form = NewUserForm(request.POST)
if form.is_valid():
form.save(commit=True)
return welcome(request)
else:
print('ERROR FORM INVALID')
return render(request, 'users.html', {'form': form})
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse
# from appTwo.models import User
from appTwo.forms import NewUserForm
# Create your views here.
# def index(request):
# return HttpResponse("<em>My Second Project</em>")
def welcome(request):
# welcomedict={'welcome_insert':'Go to /users to see the list of user information!'}
return render(request,'welcome.html')
def users(request):
# users_list=User.objects.all()
# user_dict={'users':users_list}
form = NewUserForm()
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
form.save(commit=True)
return welcome(request)
else:
print('ERROR FORM INVALID')
return render(request,"users.html",{'form':form})
|
flexible
|
{
"blob_id": "d5f66d92371838c703abbf80e2b78717cdd4a4fb",
"index": 7140,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef welcome(request):\n return render(request, 'welcome.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef welcome(request):\n return render(request, 'welcome.html')\n\n\ndef users(request):\n form = NewUserForm()\n if request.method == 'POST':\n form = NewUserForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n return welcome(request)\n else:\n print('ERROR FORM INVALID')\n return render(request, 'users.html', {'form': form})\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom appTwo.forms import NewUserForm\n\n\ndef welcome(request):\n return render(request, 'welcome.html')\n\n\ndef users(request):\n form = NewUserForm()\n if request.method == 'POST':\n form = NewUserForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n return welcome(request)\n else:\n print('ERROR FORM INVALID')\n return render(request, 'users.html', {'form': form})\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponse\n# from appTwo.models import User\nfrom appTwo.forms import NewUserForm\n# Create your views here.\n\n\n# def index(request):\n# return HttpResponse(\"<em>My Second Project</em>\")\n\ndef welcome(request):\n # welcomedict={'welcome_insert':'Go to /users to see the list of user information!'}\n return render(request,'welcome.html')\n\ndef users(request):\n # users_list=User.objects.all()\n # user_dict={'users':users_list}\n form = NewUserForm()\n\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return welcome(request)\n else:\n print('ERROR FORM INVALID')\n\n return render(request,\"users.html\",{'form':form})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
from ba1g import hamming_distance
from ba2c import profile_most_probable
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i + k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(
motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
profile = form_profile(motifs)
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
'''
Implement GreedyMotifSearch
http://rosalind.info/problems/ba2d/
Given: Integers k and t, followed by a collection of strings Dna.
Return: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.
'''
import pandas as pd
from ba1g import hamming_distance
from ba2c import profile_most_probable
filename = 'rosalind_ba2d.txt'
BASES = ['A', 'C', 'G', 'T']
def greedy_motif_search(dnas, k, t):
# took ~4 min to run on test dataset but seems to be the correct algorithm
# based on pseudocode (and other peoples' submissions)
best_motifs = [dna[:k] for dna in dnas]
best_score = score_motifs(best_motifs)
for i in range(len(dnas[0]) - k + 1):
print(i)
motifs = [dnas[0][i:i+k]]
for j in range(1, t):
motifs.append(profile_most_probable(dnas[j], k, form_profile(motifs)))
score = score_motifs(motifs)
if score < best_score:
best_motifs = motifs
best_score = score
return best_motifs
def form_profile(motifs):
profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)
for motif in motifs:
for i, base in enumerate(motif):
profile.loc[base, i] += 1
return profile / len(motifs)
def score_motifs(motifs):
# couldn't figure out what 'score' from pseudocode meant :(
# had to reference someone else's code:
# https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py
profile = form_profile(motifs)
# neat df function generates the consensus string
consensus = ''.join(profile.idxmax())
return sum(hamming_distance(motif, consensus) for motif in motifs)
def main():
with open(filename) as f:
k, t = list(map(int, f.readline().strip().split()))
dnas = [line.strip() for line in f.readlines()]
for motif in greedy_motif_search(dnas, k, t):
print(motif)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "ed7fa6e6f30eb06400cb38128617967a597f6c04",
"index": 2450,
"step-1": "<mask token>\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport pandas as pd\nfrom ba1g import hamming_distance\nfrom ba2c import profile_most_probable\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nImplement GreedyMotifSearch\nhttp://rosalind.info/problems/ba2d/\n\nGiven: Integers k and t, followed by a collection of strings Dna.\n\nReturn: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.\n'''\nimport pandas as pd\n\nfrom ba1g import hamming_distance\nfrom ba2c import profile_most_probable\n\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\ndef greedy_motif_search(dnas, k, t):\n\t# took ~4 min to run on test dataset but seems to be the correct algorithm\n\t# based on pseudocode (and other peoples' submissions)\n\tbest_motifs = [dna[:k] for dna in dnas]\n\tbest_score = score_motifs(best_motifs)\n\tfor i in range(len(dnas[0]) - k + 1):\n\t\tprint(i)\n\t\tmotifs = [dnas[0][i:i+k]]\n\t\tfor j in range(1, t):\n\t\t\tmotifs.append(profile_most_probable(dnas[j], k, form_profile(motifs)))\n\t\tscore = score_motifs(motifs)\n\t\tif score < best_score:\n\t\t\tbest_motifs = motifs\n\t\t\tbest_score = score\n\treturn best_motifs\n\ndef form_profile(motifs):\n\tprofile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n\tfor motif in motifs:\n\t\tfor i, base in enumerate(motif):\n\t\t\tprofile.loc[base, i] += 1\n\treturn profile / len(motifs)\n\ndef score_motifs(motifs):\n\t# couldn't figure out what 'score' from pseudocode meant :(\n\t# had to reference someone else's code:\n\t# https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py\n\tprofile = form_profile(motifs)\n\t# neat df function generates the consensus string\n\tconsensus = ''.join(profile.idxmax())\n\treturn sum(hamming_distance(motif, consensus) for motif in motifs)\n\ndef main():\n\twith open(filename) as f:\n\t\tk, t = list(map(int, f.readline().strip().split()))\n\t\tdnas = [line.strip() for line in f.readlines()]\n\tfor motif in greedy_motif_search(dnas, k, t):\n\t\tprint(motif)\n\nif __name__ == '__main__':\n\tmain()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
<|reserved_special_token_0|>
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d', action='store_true', help=
'activate debug logging mode')
parser.add_argument('--hide-traceback', action='store_true', help=
'hide the traceback for cleaner error messages')
parser.add_argument('--profiler', '-p', action='store_true', help=
'activate the profiler')
parser.add_argument('--line-profiler', '-lp', dest='line_profiler',
action='store_true', help=
'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'
)
parser.add_argument('--ipython', '-i', action='store_true', help=
'launch the script in an interactive IPython console')
parser.add_argument('--pdb', action='store_true', help=
'activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc, epilog=_examples,
formatter_class=CustomFormatter)
self._parser.set_defaults(func=None)
self._parser.add_argument('--version', '-v', action='version',
version=phy.__version_git__, help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand')
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base', default='cortexlab', choices=('cortexlab',
'github'), help='data repository name: `cortexlab` or `github`')
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n', help=
'number of channels in the recording (only required when using a flat binary file)'
)
p.add_argument('--dtype', help=
'NumPy data type (only required when using a flat binary file)',
default='int16')
p.add_argument('--sample-rate', '-s', help=
'sample rate in Hz (only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.add_argument('--cluster-ids', '-c', help=
'list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.
RawDescriptionHelpFormatter):
pass
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
<|reserved_special_token_0|>
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d', action='store_true', help=
'activate debug logging mode')
parser.add_argument('--hide-traceback', action='store_true', help=
'hide the traceback for cleaner error messages')
parser.add_argument('--profiler', '-p', action='store_true', help=
'activate the profiler')
parser.add_argument('--line-profiler', '-lp', dest='line_profiler',
action='store_true', help=
'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'
)
parser.add_argument('--ipython', '-i', action='store_true', help=
'launch the script in an interactive IPython console')
parser.add_argument('--pdb', action='store_true', help=
'activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc, epilog=_examples,
formatter_class=CustomFormatter)
self._parser.set_defaults(func=None)
self._parser.add_argument('--version', '-v', action='version',
version=phy.__version_git__, help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand')
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base', default='cortexlab', choices=('cortexlab',
'github'), help='data repository name: `cortexlab` or `github`')
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n', help=
'number of channels in the recording (only required when using a flat binary file)'
)
p.add_argument('--dtype', help=
'NumPy data type (only required when using a flat binary file)',
default='int16')
p.add_argument('--sample-rate', '-s', help=
'sample rate in Hz (only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.add_argument('--cluster-ids', '-c', help=
'list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.
RawDescriptionHelpFormatter):
pass
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
<|reserved_special_token_0|>
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d', action='store_true', help=
'activate debug logging mode')
parser.add_argument('--hide-traceback', action='store_true', help=
'hide the traceback for cleaner error messages')
parser.add_argument('--profiler', '-p', action='store_true', help=
'activate the profiler')
parser.add_argument('--line-profiler', '-lp', dest='line_profiler',
action='store_true', help=
'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'
)
parser.add_argument('--ipython', '-i', action='store_true', help=
'launch the script in an interactive IPython console')
parser.add_argument('--pdb', action='store_true', help=
'activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc, epilog=_examples,
formatter_class=CustomFormatter)
self._parser.set_defaults(func=None)
self._parser.add_argument('--version', '-v', action='version',
version=phy.__version_git__, help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand')
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base', default='cortexlab', choices=('cortexlab',
'github'), help='data repository name: `cortexlab` or `github`')
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n', help=
'number of channels in the recording (only required when using a flat binary file)'
)
p.add_argument('--dtype', help=
'NumPy data type (only required when using a flat binary file)',
default='int16')
p.add_argument('--sample-rate', '-s', help=
'sample rate in Hz (only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.add_argument('--cluster-ids', '-c', help=
'list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
def _get_kwik_path(args):
kwik_path = args.file
if not op.exists(kwik_path):
raise IOError("The file `{}` doesn't exist.".format(kwik_path))
return kwik_path
def _create_session(args, **kwargs):
from phy.session import Session
kwik_path = _get_kwik_path(args)
session = Session(kwik_path, **kwargs)
return session
def describe(args):
from phy.io.kwik import KwikModel
path = _get_kwik_path(args)
model = KwikModel(path, clustering=args.clustering)
return 'model.describe()', dict(model=model)
def download(args):
from phy import download_sample_data
download_sample_data(args.file, output_dir=args.output_dir, base=args.base)
def traces(args):
from vispy.app import run
from phy.plot.traces import TraceView
from phy.io.h5 import open_h5
from phy.io.traces import read_kwd, read_dat
path = args.file
if path.endswith('.kwd'):
f = open_h5(args.file)
traces = read_kwd(f)
elif path.endswith(('.dat', '.bin')):
if not args.n_channels:
raise ValueError('Please specify `--n-channels`.')
if not args.dtype:
raise ValueError('Please specify `--dtype`.')
if not args.sample_rate:
raise ValueError('Please specify `--sample-rate`.')
n_channels = int(args.n_channels)
dtype = np.dtype(args.dtype)
traces = read_dat(path, dtype=dtype, n_channels=n_channels)
start, end = map(int, args.interval.split(','))
sample_rate = float(args.sample_rate)
start = int(sample_rate * start)
end = int(sample_rate * end)
c = TraceView(keys='interactive')
c.visual.traces = 0.01 * traces[start:end, ...]
c.show()
run()
return None, None
def detect(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=
kwik_path)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
args.file = kwik_path
session = _create_session(args, use_store=False)
return 'session.detect(interval=interval)', dict(session=session,
interval=interval)
def cluster_auto(args):
from phy.utils._misc import _read_python
from phy.session import Session
assert args.file.endswith('.prm')
params = _read_python(args.file)
kwik_path = params['experiment_name'] + '.kwik'
session = Session(kwik_path)
ns = dict(session=session, clustering=args.clustering)
cmd = 'session.cluster(clustering=clustering)'
return cmd, ns
def spikesort(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=
kwik_path)
args.file = kwik_path
session = _create_session(args, use_store=False)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
ns = dict(session=session, interval=interval, n_s_clusters=100)
cmd = 'session.detect(interval=interval); session.cluster();'
return cmd, ns
def cluster_manual(args):
session = _create_session(args, clustering=args.clustering, use_store=
not args.no_store)
cluster_ids = list(map(int, args.cluster_ids.split(','))
) if args.cluster_ids else None
session.model.describe()
from phy.gui import start_qt_app
start_qt_app()
gui = session.show_gui(cluster_ids=cluster_ids, show=False)
print('\nPress `ctrl+h` to see the list of keyboard shortcuts.\n')
return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)
def main(args=None):
p = ParserCreator()
if args is None:
args = sys.argv[1:]
elif isinstance(args, string_types):
args = args.split(' ')
args = p.parse(args)
if args is None:
return
if args.profiler or args.line_profiler:
from phy.utils.testing import _enable_profiler, _profile
prof = _enable_profiler(args.line_profiler)
else:
prof = None
import phy
if args.debug:
phy.debug()
if args.hide_traceback:
def exception_handler(exception_type, exception, traceback):
print('{}: {}'.format(exception_type.__name__, exception))
sys.excepthook = exception_handler
if args.pdb:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme=
'Linux', call_pdb=1)
func = args.func
if func is None:
p.parser.print_help()
return
out = func(args)
if not out:
return
cmd, ns = out
if not cmd:
return
requires_qt = ns.pop('requires_qt', False)
requires_vispy = ns.pop('requires_vispy', False)
ns.update(phy=phy, path=args.file)
if 'session' in ns:
ns['model'] = ns['session'].model
if args.ipython:
print('\nStarting IPython...')
from IPython import start_ipython
args_ipy = ['-i', "-c='{}'".format(cmd)]
if requires_qt or requires_vispy:
args_ipy += ['--gui=qt']
start_ipython(args_ipy, user_ns=ns)
else:
if not prof:
exec_(cmd, {}, ns)
else:
_profile(prof, cmd, {}, ns)
if requires_qt:
from phy.gui import run_qt_app
run_qt_app()
elif requires_vispy:
from vispy.app import use_app, run
use_app('pyqt4')
run()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.
RawDescriptionHelpFormatter):
pass
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
_examples = dedent(
"""
examples:
phy -v display the version of phy
phy download hybrid_120sec.dat -o data/
download a sample raw data file in `data/`
phy describe my_file.kwik
display information about a Kwik dataset
phy spikesort my_params.prm
run the whole suite (spike detection and clustering)
phy detect my_params.prm
run spike detection on a parameters file
phy cluster-auto my_file.kwik
run klustakwik on a dataset (after spike detection)
phy cluster-manual my_file.kwik
run the manual clustering GUI
"""
)
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d', action='store_true', help=
'activate debug logging mode')
parser.add_argument('--hide-traceback', action='store_true', help=
'hide the traceback for cleaner error messages')
parser.add_argument('--profiler', '-p', action='store_true', help=
'activate the profiler')
parser.add_argument('--line-profiler', '-lp', dest='line_profiler',
action='store_true', help=
'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'
)
parser.add_argument('--ipython', '-i', action='store_true', help=
'launch the script in an interactive IPython console')
parser.add_argument('--pdb', action='store_true', help=
'activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc, epilog=_examples,
formatter_class=CustomFormatter)
self._parser.set_defaults(func=None)
self._parser.add_argument('--version', '-v', action='version',
version=phy.__version_git__, help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand')
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base', default='cortexlab', choices=('cortexlab',
'github'), help='data repository name: `cortexlab` or `github`')
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n', help=
'number of channels in the recording (only required when using a flat binary file)'
)
p.add_argument('--dtype', help=
'NumPy data type (only required when using a flat binary file)',
default='int16')
p.add_argument('--sample-rate', '-s', help=
'sample rate in Hz (only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help=
'filename of the `.kwik` file to create (by default, `"experiment_name".kwik`)'
)
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval', help=
'detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main', help=
'name of the clustering to use')
p.add_argument('--cluster-ids', '-c', help=
'list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
def _get_kwik_path(args):
kwik_path = args.file
if not op.exists(kwik_path):
raise IOError("The file `{}` doesn't exist.".format(kwik_path))
return kwik_path
def _create_session(args, **kwargs):
from phy.session import Session
kwik_path = _get_kwik_path(args)
session = Session(kwik_path, **kwargs)
return session
def describe(args):
from phy.io.kwik import KwikModel
path = _get_kwik_path(args)
model = KwikModel(path, clustering=args.clustering)
return 'model.describe()', dict(model=model)
def download(args):
from phy import download_sample_data
download_sample_data(args.file, output_dir=args.output_dir, base=args.base)
def traces(args):
from vispy.app import run
from phy.plot.traces import TraceView
from phy.io.h5 import open_h5
from phy.io.traces import read_kwd, read_dat
path = args.file
if path.endswith('.kwd'):
f = open_h5(args.file)
traces = read_kwd(f)
elif path.endswith(('.dat', '.bin')):
if not args.n_channels:
raise ValueError('Please specify `--n-channels`.')
if not args.dtype:
raise ValueError('Please specify `--dtype`.')
if not args.sample_rate:
raise ValueError('Please specify `--sample-rate`.')
n_channels = int(args.n_channels)
dtype = np.dtype(args.dtype)
traces = read_dat(path, dtype=dtype, n_channels=n_channels)
start, end = map(int, args.interval.split(','))
sample_rate = float(args.sample_rate)
start = int(sample_rate * start)
end = int(sample_rate * end)
c = TraceView(keys='interactive')
c.visual.traces = 0.01 * traces[start:end, ...]
c.show()
run()
return None, None
def detect(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=
kwik_path)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
args.file = kwik_path
session = _create_session(args, use_store=False)
return 'session.detect(interval=interval)', dict(session=session,
interval=interval)
def cluster_auto(args):
from phy.utils._misc import _read_python
from phy.session import Session
assert args.file.endswith('.prm')
params = _read_python(args.file)
kwik_path = params['experiment_name'] + '.kwik'
session = Session(kwik_path)
ns = dict(session=session, clustering=args.clustering)
cmd = 'session.cluster(clustering=clustering)'
return cmd, ns
def spikesort(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=
kwik_path)
args.file = kwik_path
session = _create_session(args, use_store=False)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
ns = dict(session=session, interval=interval, n_s_clusters=100)
cmd = 'session.detect(interval=interval); session.cluster();'
return cmd, ns
def cluster_manual(args):
session = _create_session(args, clustering=args.clustering, use_store=
not args.no_store)
cluster_ids = list(map(int, args.cluster_ids.split(','))
) if args.cluster_ids else None
session.model.describe()
from phy.gui import start_qt_app
start_qt_app()
gui = session.show_gui(cluster_ids=cluster_ids, show=False)
print('\nPress `ctrl+h` to see the list of keyboard shortcuts.\n')
return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)
def main(args=None):
p = ParserCreator()
if args is None:
args = sys.argv[1:]
elif isinstance(args, string_types):
args = args.split(' ')
args = p.parse(args)
if args is None:
return
if args.profiler or args.line_profiler:
from phy.utils.testing import _enable_profiler, _profile
prof = _enable_profiler(args.line_profiler)
else:
prof = None
import phy
if args.debug:
phy.debug()
if args.hide_traceback:
def exception_handler(exception_type, exception, traceback):
print('{}: {}'.format(exception_type.__name__, exception))
sys.excepthook = exception_handler
if args.pdb:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme=
'Linux', call_pdb=1)
func = args.func
if func is None:
p.parser.print_help()
return
out = func(args)
if not out:
return
cmd, ns = out
if not cmd:
return
requires_qt = ns.pop('requires_qt', False)
requires_vispy = ns.pop('requires_vispy', False)
ns.update(phy=phy, path=args.file)
if 'session' in ns:
ns['model'] = ns['session'].model
if args.ipython:
print('\nStarting IPython...')
from IPython import start_ipython
args_ipy = ['-i', "-c='{}'".format(cmd)]
if requires_qt or requires_vispy:
args_ipy += ['--gui=qt']
start_ipython(args_ipy, user_ns=ns)
else:
if not prof:
exec_(cmd, {}, ns)
else:
_profile(prof, cmd, {}, ns)
if requires_qt:
from phy.gui import run_qt_app
run_qt_app()
elif requires_vispy:
from vispy.app import use_app, run
use_app('pyqt4')
run()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import print_function
"""phy main CLI tool.
Usage:
phy --help
"""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import sys
import os.path as op
import argparse
from textwrap import dedent
import numpy as np
from six import exec_, string_types
#------------------------------------------------------------------------------
# Parser utilities
#------------------------------------------------------------------------------
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
_examples = dedent("""
examples:
phy -v display the version of phy
phy download hybrid_120sec.dat -o data/
download a sample raw data file in `data/`
phy describe my_file.kwik
display information about a Kwik dataset
phy spikesort my_params.prm
run the whole suite (spike detection and clustering)
phy detect my_params.prm
run spike detection on a parameters file
phy cluster-auto my_file.kwik
run klustakwik on a dataset (after spike detection)
phy cluster-manual my_file.kwik
run the manual clustering GUI
""")
#------------------------------------------------------------------------------
# Parser creator
#------------------------------------------------------------------------------
class ParserCreator(object):
def __init__(self):
self.create_main()
self.create_download()
self.create_traces()
self.create_describe()
self.create_spikesort()
self.create_detect()
self.create_auto()
self.create_manual()
self.create_notebook()
@property
def parser(self):
return self._parser
def _add_sub_parser(self, name, desc):
p = self._subparsers.add_parser(name, help=desc, description=desc)
self._add_options(p)
return p
def _add_options(self, parser):
parser.add_argument('--debug', '-d',
action='store_true',
help='activate debug logging mode')
parser.add_argument('--hide-traceback',
action='store_true',
help='hide the traceback for cleaner error '
'messages')
parser.add_argument('--profiler', '-p',
action='store_true',
help='activate the profiler')
parser.add_argument('--line-profiler', '-lp',
dest='line_profiler',
action='store_true',
help='activate the line-profiler -- you '
'need to decorate the functions '
'to profile with `@profile` '
'in the code')
parser.add_argument('--ipython', '-i', action='store_true',
help='launch the script in an interactive '
'IPython console')
parser.add_argument('--pdb', action='store_true',
help='activate the Python debugger')
def create_main(self):
import phy
desc = sys.modules['phy'].__doc__
self._parser = Parser(description=desc,
epilog=_examples,
formatter_class=CustomFormatter,
)
self._parser.set_defaults(func=None)
self._parser.add_argument('--version', '-v',
action='version',
version=phy.__version_git__,
help='print the version of phy')
self._add_options(self._parser)
self._subparsers = self._parser.add_subparsers(dest='command',
title='subcommand',
)
def create_download(self):
desc = 'download a sample dataset'
p = self._add_sub_parser('download', desc)
p.add_argument('file', help='dataset filename')
p.add_argument('--output-dir', '-o', help='output directory')
p.add_argument('--base',
default='cortexlab',
choices=('cortexlab', 'github'),
help='data repository name: `cortexlab` or `github`',
)
p.set_defaults(func=download)
def create_describe(self):
desc = 'describe a `.kwik` file'
p = self._add_sub_parser('describe', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.set_defaults(func=describe)
def create_traces(self):
desc = 'show the traces of a raw data file'
p = self._add_sub_parser('traces', desc)
p.add_argument('file', help='path to a `.kwd` or `.dat` file')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.add_argument('--n-channels', '-n',
help='number of channels in the recording '
'(only required when using a flat binary file)')
p.add_argument('--dtype',
help='NumPy data type '
'(only required when using a flat binary file)',
default='int16',
)
p.add_argument('--sample-rate', '-s',
help='sample rate in Hz '
'(only required when using a flat binary file)')
p.set_defaults(func=traces)
def create_spikesort(self):
desc = 'launch the whole spike sorting pipeline on a `.prm` file'
p = self._add_sub_parser('spikesort', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help='filename of the `.kwik` file '
'to create (by default, `"experiment_name".kwik`)')
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=spikesort)
def create_detect(self):
desc = 'launch the spike detection algorithm on a `.prm` file'
p = self._add_sub_parser('detect', desc)
p.add_argument('file', help='path to a `.prm` file')
p.add_argument('--kwik-path', help='filename of the `.kwik` file '
'to create (by default, `"experiment_name".kwik`)')
p.add_argument('--overwrite', action='store_true', default=False,
help='overwrite the `.kwik` file ')
p.add_argument('--interval',
help='detection interval in seconds (e.g. `0,10`)')
p.set_defaults(func=detect)
def create_auto(self):
desc = 'launch the automatic clustering algorithm on a `.kwik` file'
p = self._add_sub_parser('cluster-auto', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.set_defaults(func=cluster_auto)
def create_manual(self):
desc = 'launch the manual clustering GUI on a `.kwik` file'
p = self._add_sub_parser('cluster-manual', desc)
p.add_argument('file', help='path to a `.kwik` file')
p.add_argument('--clustering', default='main',
help='name of the clustering to use')
p.add_argument('--cluster-ids', '-c',
help='list of clusters to select initially')
p.add_argument('--no-store', action='store_true', default=False,
help='do not create the store (faster loading time, '
'slower GUI)')
p.set_defaults(func=cluster_manual)
def create_notebook(self):
# TODO
pass
def parse(self, args):
try:
return self._parser.parse_args(args)
except SystemExit as e:
if e.code != 0:
raise e
#------------------------------------------------------------------------------
# Subcommand functions
#------------------------------------------------------------------------------
def _get_kwik_path(args):
kwik_path = args.file
if not op.exists(kwik_path):
raise IOError("The file `{}` doesn't exist.".format(kwik_path))
return kwik_path
def _create_session(args, **kwargs):
from phy.session import Session
kwik_path = _get_kwik_path(args)
session = Session(kwik_path, **kwargs)
return session
def describe(args):
from phy.io.kwik import KwikModel
path = _get_kwik_path(args)
model = KwikModel(path, clustering=args.clustering)
return 'model.describe()', dict(model=model)
def download(args):
from phy import download_sample_data
download_sample_data(args.file,
output_dir=args.output_dir,
base=args.base,
)
def traces(args):
from vispy.app import run
from phy.plot.traces import TraceView
from phy.io.h5 import open_h5
from phy.io.traces import read_kwd, read_dat
path = args.file
if path.endswith('.kwd'):
f = open_h5(args.file)
traces = read_kwd(f)
elif path.endswith(('.dat', '.bin')):
if not args.n_channels:
raise ValueError("Please specify `--n-channels`.")
if not args.dtype:
raise ValueError("Please specify `--dtype`.")
if not args.sample_rate:
raise ValueError("Please specify `--sample-rate`.")
n_channels = int(args.n_channels)
dtype = np.dtype(args.dtype)
traces = read_dat(path, dtype=dtype, n_channels=n_channels)
start, end = map(int, args.interval.split(','))
sample_rate = float(args.sample_rate)
start = int(sample_rate * start)
end = int(sample_rate * end)
c = TraceView(keys='interactive')
c.visual.traces = .01 * traces[start:end, ...]
c.show()
run()
return None, None
def detect(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file,
overwrite=args.overwrite,
kwik_path=kwik_path)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
# Create the session with the newly-created .kwik file.
args.file = kwik_path
session = _create_session(args, use_store=False)
return ('session.detect(interval=interval)',
dict(session=session, interval=interval))
def cluster_auto(args):
from phy.utils._misc import _read_python
from phy.session import Session
assert args.file.endswith('.prm')
params = _read_python(args.file)
kwik_path = params['experiment_name'] + '.kwik'
session = Session(kwik_path)
ns = dict(session=session,
clustering=args.clustering,
)
cmd = ('session.cluster(clustering=clustering)')
return (cmd, ns)
def spikesort(args):
from phy.io import create_kwik
assert args.file.endswith('.prm')
kwik_path = args.kwik_path
kwik_path = create_kwik(args.file,
overwrite=args.overwrite,
kwik_path=kwik_path,
)
# Create the session with the newly-created .kwik file.
args.file = kwik_path
session = _create_session(args, use_store=False)
interval = args.interval
if interval is not None:
interval = list(map(float, interval.split(',')))
ns = dict(session=session,
interval=interval,
n_s_clusters=100, # TODO: better handling of KK parameters
)
cmd = ('session.detect(interval=interval); session.cluster();')
return (cmd, ns)
def cluster_manual(args):
session = _create_session(args,
clustering=args.clustering,
use_store=not(args.no_store),
)
cluster_ids = (list(map(int, args.cluster_ids.split(',')))
if args.cluster_ids else None)
session.model.describe()
from phy.gui import start_qt_app
start_qt_app()
gui = session.show_gui(cluster_ids=cluster_ids, show=False)
print("\nPress `ctrl+h` to see the list of keyboard shortcuts.\n")
return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)
#------------------------------------------------------------------------------
# Main functions
#------------------------------------------------------------------------------
def main(args=None):
p = ParserCreator()
if args is None:
args = sys.argv[1:]
elif isinstance(args, string_types):
args = args.split(' ')
args = p.parse(args)
if args is None:
return
if args.profiler or args.line_profiler:
from phy.utils.testing import _enable_profiler, _profile
prof = _enable_profiler(args.line_profiler)
else:
prof = None
import phy
if args.debug:
phy.debug()
# Hide the traceback.
if args.hide_traceback:
def exception_handler(exception_type, exception, traceback):
print("{}: {}".format(exception_type.__name__, exception))
sys.excepthook = exception_handler
# Activate IPython debugger.
if args.pdb:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux',
call_pdb=1,
)
func = args.func
if func is None:
p.parser.print_help()
return
out = func(args)
if not out:
return
cmd, ns = out
if not cmd:
return
requires_qt = ns.pop('requires_qt', False)
requires_vispy = ns.pop('requires_vispy', False)
# Default variables in namespace.
ns.update(phy=phy, path=args.file)
if 'session' in ns:
ns['model'] = ns['session'].model
# Interactive mode with IPython.
if args.ipython:
print("\nStarting IPython...")
from IPython import start_ipython
args_ipy = ["-i", "-c='{}'".format(cmd)]
if requires_qt or requires_vispy:
# Activate Qt event loop integration with Qt.
args_ipy += ["--gui=qt"]
start_ipython(args_ipy, user_ns=ns)
else:
if not prof:
exec_(cmd, {}, ns)
else:
_profile(prof, cmd, {}, ns)
if requires_qt:
# Launch the Qt app.
from phy.gui import run_qt_app
run_qt_app()
elif requires_vispy:
# Launch the VisPy Qt app.
from vispy.app import use_app, run
use_app('pyqt4')
run()
#------------------------------------------------------------------------------
# Entry point
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "539523f177e2c3c0e1fb0226d1fcd65463b68a0e",
"index": 6576,
"step-1": "<mask token>\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n<mask token>\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.\n RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n<mask token>\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.\n RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n<mask token>\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\ndef _get_kwik_path(args):\n kwik_path = args.file\n if not op.exists(kwik_path):\n raise IOError(\"The file `{}` doesn't exist.\".format(kwik_path))\n return kwik_path\n\n\ndef _create_session(args, **kwargs):\n from phy.session import Session\n kwik_path = _get_kwik_path(args)\n session = Session(kwik_path, **kwargs)\n return session\n\n\ndef describe(args):\n from phy.io.kwik import KwikModel\n path = _get_kwik_path(args)\n model = KwikModel(path, clustering=args.clustering)\n return 'model.describe()', dict(model=model)\n\n\ndef download(args):\n from phy import download_sample_data\n download_sample_data(args.file, output_dir=args.output_dir, base=args.base)\n\n\ndef traces(args):\n from vispy.app import run\n from phy.plot.traces import TraceView\n from phy.io.h5 import open_h5\n from phy.io.traces import read_kwd, read_dat\n path = args.file\n if path.endswith('.kwd'):\n f = open_h5(args.file)\n traces = read_kwd(f)\n elif path.endswith(('.dat', '.bin')):\n if not args.n_channels:\n raise ValueError('Please specify `--n-channels`.')\n if not args.dtype:\n raise ValueError('Please specify `--dtype`.')\n if not args.sample_rate:\n raise ValueError('Please specify `--sample-rate`.')\n n_channels = int(args.n_channels)\n dtype = np.dtype(args.dtype)\n traces = read_dat(path, dtype=dtype, n_channels=n_channels)\n start, end = map(int, args.interval.split(','))\n sample_rate = float(args.sample_rate)\n start = int(sample_rate * start)\n end = int(sample_rate * end)\n c = TraceView(keys='interactive')\n c.visual.traces = 0.01 * traces[start:end, ...]\n c.show()\n run()\n return None, None\n\n\ndef detect(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n return 'session.detect(interval=interval)', dict(session=session,\n interval=interval)\n\n\ndef cluster_auto(args):\n from phy.utils._misc import _read_python\n from phy.session import Session\n assert args.file.endswith('.prm')\n params = _read_python(args.file)\n kwik_path = params['experiment_name'] + '.kwik'\n session = Session(kwik_path)\n ns = dict(session=session, clustering=args.clustering)\n cmd = 'session.cluster(clustering=clustering)'\n return cmd, ns\n\n\ndef spikesort(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n ns = dict(session=session, interval=interval, n_s_clusters=100)\n cmd = 'session.detect(interval=interval); session.cluster();'\n return cmd, ns\n\n\ndef cluster_manual(args):\n session = _create_session(args, clustering=args.clustering, use_store=\n not args.no_store)\n cluster_ids = list(map(int, args.cluster_ids.split(','))\n ) if args.cluster_ids else None\n session.model.describe()\n from phy.gui import start_qt_app\n start_qt_app()\n gui = session.show_gui(cluster_ids=cluster_ids, show=False)\n print('\\nPress `ctrl+h` to see the list of keyboard shortcuts.\\n')\n return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)\n\n\ndef main(args=None):\n p = ParserCreator()\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, string_types):\n args = args.split(' ')\n args = p.parse(args)\n if args is None:\n return\n if args.profiler or args.line_profiler:\n from phy.utils.testing import _enable_profiler, _profile\n prof = _enable_profiler(args.line_profiler)\n else:\n prof = None\n import phy\n if args.debug:\n phy.debug()\n if args.hide_traceback:\n\n def exception_handler(exception_type, exception, traceback):\n print('{}: {}'.format(exception_type.__name__, exception))\n sys.excepthook = exception_handler\n if args.pdb:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme=\n 'Linux', call_pdb=1)\n func = args.func\n if func is None:\n p.parser.print_help()\n return\n out = func(args)\n if not out:\n return\n cmd, ns = out\n if not cmd:\n return\n requires_qt = ns.pop('requires_qt', False)\n requires_vispy = ns.pop('requires_vispy', False)\n ns.update(phy=phy, path=args.file)\n if 'session' in ns:\n ns['model'] = ns['session'].model\n if args.ipython:\n print('\\nStarting IPython...')\n from IPython import start_ipython\n args_ipy = ['-i', \"-c='{}'\".format(cmd)]\n if requires_qt or requires_vispy:\n args_ipy += ['--gui=qt']\n start_ipython(args_ipy, user_ns=ns)\n else:\n if not prof:\n exec_(cmd, {}, ns)\n else:\n _profile(prof, cmd, {}, ns)\n if requires_qt:\n from phy.gui import run_qt_app\n run_qt_app()\n elif requires_vispy:\n from vispy.app import use_app, run\n use_app('pyqt4')\n run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\n\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.\n RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n_examples = dedent(\n \"\"\"\n\nexamples:\n phy -v display the version of phy\n phy download hybrid_120sec.dat -o data/\n download a sample raw data file in `data/`\n phy describe my_file.kwik\n display information about a Kwik dataset\n phy spikesort my_params.prm\n run the whole suite (spike detection and clustering)\n phy detect my_params.prm\n run spike detection on a parameters file\n phy cluster-auto my_file.kwik\n run klustakwik on a dataset (after spike detection)\n phy cluster-manual my_file.kwik\n run the manual clustering GUI\n\n\"\"\"\n )\n\n\nclass ParserCreator(object):\n\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d', action='store_true', help=\n 'activate debug logging mode')\n parser.add_argument('--hide-traceback', action='store_true', help=\n 'hide the traceback for cleaner error messages')\n parser.add_argument('--profiler', '-p', action='store_true', help=\n 'activate the profiler')\n parser.add_argument('--line-profiler', '-lp', dest='line_profiler',\n action='store_true', help=\n 'activate the line-profiler -- you need to decorate the functions to profile with `@profile` in the code'\n )\n parser.add_argument('--ipython', '-i', action='store_true', help=\n 'launch the script in an interactive IPython console')\n parser.add_argument('--pdb', action='store_true', help=\n 'activate the Python debugger')\n\n def create_main(self):\n import phy\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc, epilog=_examples,\n formatter_class=CustomFormatter)\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v', action='version',\n version=phy.__version_git__, help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand')\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base', default='cortexlab', choices=('cortexlab',\n 'github'), help='data repository name: `cortexlab` or `github`')\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n', help=\n 'number of channels in the recording (only required when using a flat binary file)'\n )\n p.add_argument('--dtype', help=\n 'NumPy data type (only required when using a flat binary file)',\n default='int16')\n p.add_argument('--sample-rate', '-s', help=\n 'sample rate in Hz (only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help=\n 'filename of the `.kwik` file to create (by default, `\"experiment_name\".kwik`)'\n )\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval', help=\n 'detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main', help=\n 'name of the clustering to use')\n p.add_argument('--cluster-ids', '-c', help=\n 'list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\ndef _get_kwik_path(args):\n kwik_path = args.file\n if not op.exists(kwik_path):\n raise IOError(\"The file `{}` doesn't exist.\".format(kwik_path))\n return kwik_path\n\n\ndef _create_session(args, **kwargs):\n from phy.session import Session\n kwik_path = _get_kwik_path(args)\n session = Session(kwik_path, **kwargs)\n return session\n\n\ndef describe(args):\n from phy.io.kwik import KwikModel\n path = _get_kwik_path(args)\n model = KwikModel(path, clustering=args.clustering)\n return 'model.describe()', dict(model=model)\n\n\ndef download(args):\n from phy import download_sample_data\n download_sample_data(args.file, output_dir=args.output_dir, base=args.base)\n\n\ndef traces(args):\n from vispy.app import run\n from phy.plot.traces import TraceView\n from phy.io.h5 import open_h5\n from phy.io.traces import read_kwd, read_dat\n path = args.file\n if path.endswith('.kwd'):\n f = open_h5(args.file)\n traces = read_kwd(f)\n elif path.endswith(('.dat', '.bin')):\n if not args.n_channels:\n raise ValueError('Please specify `--n-channels`.')\n if not args.dtype:\n raise ValueError('Please specify `--dtype`.')\n if not args.sample_rate:\n raise ValueError('Please specify `--sample-rate`.')\n n_channels = int(args.n_channels)\n dtype = np.dtype(args.dtype)\n traces = read_dat(path, dtype=dtype, n_channels=n_channels)\n start, end = map(int, args.interval.split(','))\n sample_rate = float(args.sample_rate)\n start = int(sample_rate * start)\n end = int(sample_rate * end)\n c = TraceView(keys='interactive')\n c.visual.traces = 0.01 * traces[start:end, ...]\n c.show()\n run()\n return None, None\n\n\ndef detect(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n return 'session.detect(interval=interval)', dict(session=session,\n interval=interval)\n\n\ndef cluster_auto(args):\n from phy.utils._misc import _read_python\n from phy.session import Session\n assert args.file.endswith('.prm')\n params = _read_python(args.file)\n kwik_path = params['experiment_name'] + '.kwik'\n session = Session(kwik_path)\n ns = dict(session=session, clustering=args.clustering)\n cmd = 'session.cluster(clustering=clustering)'\n return cmd, ns\n\n\ndef spikesort(args):\n from phy.io import create_kwik\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file, overwrite=args.overwrite, kwik_path=\n kwik_path)\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n ns = dict(session=session, interval=interval, n_s_clusters=100)\n cmd = 'session.detect(interval=interval); session.cluster();'\n return cmd, ns\n\n\ndef cluster_manual(args):\n session = _create_session(args, clustering=args.clustering, use_store=\n not args.no_store)\n cluster_ids = list(map(int, args.cluster_ids.split(','))\n ) if args.cluster_ids else None\n session.model.describe()\n from phy.gui import start_qt_app\n start_qt_app()\n gui = session.show_gui(cluster_ids=cluster_ids, show=False)\n print('\\nPress `ctrl+h` to see the list of keyboard shortcuts.\\n')\n return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)\n\n\ndef main(args=None):\n p = ParserCreator()\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, string_types):\n args = args.split(' ')\n args = p.parse(args)\n if args is None:\n return\n if args.profiler or args.line_profiler:\n from phy.utils.testing import _enable_profiler, _profile\n prof = _enable_profiler(args.line_profiler)\n else:\n prof = None\n import phy\n if args.debug:\n phy.debug()\n if args.hide_traceback:\n\n def exception_handler(exception_type, exception, traceback):\n print('{}: {}'.format(exception_type.__name__, exception))\n sys.excepthook = exception_handler\n if args.pdb:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose', color_scheme=\n 'Linux', call_pdb=1)\n func = args.func\n if func is None:\n p.parser.print_help()\n return\n out = func(args)\n if not out:\n return\n cmd, ns = out\n if not cmd:\n return\n requires_qt = ns.pop('requires_qt', False)\n requires_vispy = ns.pop('requires_vispy', False)\n ns.update(phy=phy, path=args.file)\n if 'session' in ns:\n ns['model'] = ns['session'].model\n if args.ipython:\n print('\\nStarting IPython...')\n from IPython import start_ipython\n args_ipy = ['-i', \"-c='{}'\".format(cmd)]\n if requires_qt or requires_vispy:\n args_ipy += ['--gui=qt']\n start_ipython(args_ipy, user_ns=ns)\n else:\n if not prof:\n exec_(cmd, {}, ns)\n else:\n _profile(prof, cmd, {}, ns)\n if requires_qt:\n from phy.gui import run_qt_app\n run_qt_app()\n elif requires_vispy:\n from vispy.app import use_app, run\n use_app('pyqt4')\n run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\n\"\"\"phy main CLI tool.\n\nUsage:\n\n phy --help\n\n\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport sys\nimport os.path as op\nimport argparse\nfrom textwrap import dedent\n\nimport numpy as np\nfrom six import exec_, string_types\n\n\n#------------------------------------------------------------------------------\n# Parser utilities\n#------------------------------------------------------------------------------\n\nclass CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\nclass Parser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write(message + '\\n\\n')\n self.print_help()\n sys.exit(2)\n\n\n_examples = dedent(\"\"\"\n\nexamples:\n phy -v display the version of phy\n phy download hybrid_120sec.dat -o data/\n download a sample raw data file in `data/`\n phy describe my_file.kwik\n display information about a Kwik dataset\n phy spikesort my_params.prm\n run the whole suite (spike detection and clustering)\n phy detect my_params.prm\n run spike detection on a parameters file\n phy cluster-auto my_file.kwik\n run klustakwik on a dataset (after spike detection)\n phy cluster-manual my_file.kwik\n run the manual clustering GUI\n\n\"\"\")\n\n\n#------------------------------------------------------------------------------\n# Parser creator\n#------------------------------------------------------------------------------\n\nclass ParserCreator(object):\n def __init__(self):\n self.create_main()\n self.create_download()\n self.create_traces()\n self.create_describe()\n self.create_spikesort()\n self.create_detect()\n self.create_auto()\n self.create_manual()\n self.create_notebook()\n\n @property\n def parser(self):\n return self._parser\n\n def _add_sub_parser(self, name, desc):\n p = self._subparsers.add_parser(name, help=desc, description=desc)\n self._add_options(p)\n return p\n\n def _add_options(self, parser):\n parser.add_argument('--debug', '-d',\n action='store_true',\n help='activate debug logging mode')\n\n parser.add_argument('--hide-traceback',\n action='store_true',\n help='hide the traceback for cleaner error '\n 'messages')\n\n parser.add_argument('--profiler', '-p',\n action='store_true',\n help='activate the profiler')\n\n parser.add_argument('--line-profiler', '-lp',\n dest='line_profiler',\n action='store_true',\n help='activate the line-profiler -- you '\n 'need to decorate the functions '\n 'to profile with `@profile` '\n 'in the code')\n\n parser.add_argument('--ipython', '-i', action='store_true',\n help='launch the script in an interactive '\n 'IPython console')\n\n parser.add_argument('--pdb', action='store_true',\n help='activate the Python debugger')\n\n def create_main(self):\n import phy\n\n desc = sys.modules['phy'].__doc__\n self._parser = Parser(description=desc,\n epilog=_examples,\n formatter_class=CustomFormatter,\n )\n self._parser.set_defaults(func=None)\n self._parser.add_argument('--version', '-v',\n action='version',\n version=phy.__version_git__,\n help='print the version of phy')\n self._add_options(self._parser)\n self._subparsers = self._parser.add_subparsers(dest='command',\n title='subcommand',\n )\n\n def create_download(self):\n desc = 'download a sample dataset'\n p = self._add_sub_parser('download', desc)\n p.add_argument('file', help='dataset filename')\n p.add_argument('--output-dir', '-o', help='output directory')\n p.add_argument('--base',\n default='cortexlab',\n choices=('cortexlab', 'github'),\n help='data repository name: `cortexlab` or `github`',\n )\n p.set_defaults(func=download)\n\n def create_describe(self):\n desc = 'describe a `.kwik` file'\n p = self._add_sub_parser('describe', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main',\n help='name of the clustering to use')\n p.set_defaults(func=describe)\n\n def create_traces(self):\n desc = 'show the traces of a raw data file'\n p = self._add_sub_parser('traces', desc)\n p.add_argument('file', help='path to a `.kwd` or `.dat` file')\n p.add_argument('--interval',\n help='detection interval in seconds (e.g. `0,10`)')\n p.add_argument('--n-channels', '-n',\n help='number of channels in the recording '\n '(only required when using a flat binary file)')\n p.add_argument('--dtype',\n help='NumPy data type '\n '(only required when using a flat binary file)',\n default='int16',\n )\n p.add_argument('--sample-rate', '-s',\n help='sample rate in Hz '\n '(only required when using a flat binary file)')\n p.set_defaults(func=traces)\n\n def create_spikesort(self):\n desc = 'launch the whole spike sorting pipeline on a `.prm` file'\n p = self._add_sub_parser('spikesort', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help='filename of the `.kwik` file '\n 'to create (by default, `\"experiment_name\".kwik`)')\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval',\n help='detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=spikesort)\n\n def create_detect(self):\n desc = 'launch the spike detection algorithm on a `.prm` file'\n p = self._add_sub_parser('detect', desc)\n p.add_argument('file', help='path to a `.prm` file')\n p.add_argument('--kwik-path', help='filename of the `.kwik` file '\n 'to create (by default, `\"experiment_name\".kwik`)')\n p.add_argument('--overwrite', action='store_true', default=False,\n help='overwrite the `.kwik` file ')\n p.add_argument('--interval',\n help='detection interval in seconds (e.g. `0,10`)')\n p.set_defaults(func=detect)\n\n def create_auto(self):\n desc = 'launch the automatic clustering algorithm on a `.kwik` file'\n p = self._add_sub_parser('cluster-auto', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main',\n help='name of the clustering to use')\n p.set_defaults(func=cluster_auto)\n\n def create_manual(self):\n desc = 'launch the manual clustering GUI on a `.kwik` file'\n p = self._add_sub_parser('cluster-manual', desc)\n p.add_argument('file', help='path to a `.kwik` file')\n p.add_argument('--clustering', default='main',\n help='name of the clustering to use')\n p.add_argument('--cluster-ids', '-c',\n help='list of clusters to select initially')\n p.add_argument('--no-store', action='store_true', default=False,\n help='do not create the store (faster loading time, '\n 'slower GUI)')\n p.set_defaults(func=cluster_manual)\n\n def create_notebook(self):\n # TODO\n pass\n\n def parse(self, args):\n try:\n return self._parser.parse_args(args)\n except SystemExit as e:\n if e.code != 0:\n raise e\n\n\n#------------------------------------------------------------------------------\n# Subcommand functions\n#------------------------------------------------------------------------------\n\ndef _get_kwik_path(args):\n kwik_path = args.file\n\n if not op.exists(kwik_path):\n raise IOError(\"The file `{}` doesn't exist.\".format(kwik_path))\n\n return kwik_path\n\n\ndef _create_session(args, **kwargs):\n from phy.session import Session\n kwik_path = _get_kwik_path(args)\n session = Session(kwik_path, **kwargs)\n return session\n\n\ndef describe(args):\n from phy.io.kwik import KwikModel\n path = _get_kwik_path(args)\n model = KwikModel(path, clustering=args.clustering)\n return 'model.describe()', dict(model=model)\n\n\ndef download(args):\n from phy import download_sample_data\n download_sample_data(args.file,\n output_dir=args.output_dir,\n base=args.base,\n )\n\n\ndef traces(args):\n from vispy.app import run\n from phy.plot.traces import TraceView\n from phy.io.h5 import open_h5\n from phy.io.traces import read_kwd, read_dat\n\n path = args.file\n if path.endswith('.kwd'):\n f = open_h5(args.file)\n traces = read_kwd(f)\n elif path.endswith(('.dat', '.bin')):\n if not args.n_channels:\n raise ValueError(\"Please specify `--n-channels`.\")\n if not args.dtype:\n raise ValueError(\"Please specify `--dtype`.\")\n if not args.sample_rate:\n raise ValueError(\"Please specify `--sample-rate`.\")\n n_channels = int(args.n_channels)\n dtype = np.dtype(args.dtype)\n traces = read_dat(path, dtype=dtype, n_channels=n_channels)\n\n start, end = map(int, args.interval.split(','))\n sample_rate = float(args.sample_rate)\n start = int(sample_rate * start)\n end = int(sample_rate * end)\n\n c = TraceView(keys='interactive')\n c.visual.traces = .01 * traces[start:end, ...]\n c.show()\n run()\n\n return None, None\n\n\ndef detect(args):\n from phy.io import create_kwik\n\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file,\n overwrite=args.overwrite,\n kwik_path=kwik_path)\n\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n\n # Create the session with the newly-created .kwik file.\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n return ('session.detect(interval=interval)',\n dict(session=session, interval=interval))\n\n\ndef cluster_auto(args):\n from phy.utils._misc import _read_python\n from phy.session import Session\n\n assert args.file.endswith('.prm')\n\n params = _read_python(args.file)\n kwik_path = params['experiment_name'] + '.kwik'\n session = Session(kwik_path)\n\n ns = dict(session=session,\n clustering=args.clustering,\n )\n cmd = ('session.cluster(clustering=clustering)')\n return (cmd, ns)\n\n\ndef spikesort(args):\n from phy.io import create_kwik\n\n assert args.file.endswith('.prm')\n kwik_path = args.kwik_path\n kwik_path = create_kwik(args.file,\n overwrite=args.overwrite,\n kwik_path=kwik_path,\n )\n # Create the session with the newly-created .kwik file.\n args.file = kwik_path\n session = _create_session(args, use_store=False)\n\n interval = args.interval\n if interval is not None:\n interval = list(map(float, interval.split(',')))\n\n ns = dict(session=session,\n interval=interval,\n n_s_clusters=100, # TODO: better handling of KK parameters\n )\n cmd = ('session.detect(interval=interval); session.cluster();')\n return (cmd, ns)\n\n\ndef cluster_manual(args):\n session = _create_session(args,\n clustering=args.clustering,\n use_store=not(args.no_store),\n )\n cluster_ids = (list(map(int, args.cluster_ids.split(',')))\n if args.cluster_ids else None)\n\n session.model.describe()\n\n from phy.gui import start_qt_app\n start_qt_app()\n\n gui = session.show_gui(cluster_ids=cluster_ids, show=False)\n print(\"\\nPress `ctrl+h` to see the list of keyboard shortcuts.\\n\")\n return 'gui.show()', dict(session=session, gui=gui, requires_qt=True)\n\n\n#------------------------------------------------------------------------------\n# Main functions\n#------------------------------------------------------------------------------\n\ndef main(args=None):\n p = ParserCreator()\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, string_types):\n args = args.split(' ')\n args = p.parse(args)\n if args is None:\n return\n\n if args.profiler or args.line_profiler:\n from phy.utils.testing import _enable_profiler, _profile\n prof = _enable_profiler(args.line_profiler)\n else:\n prof = None\n\n import phy\n if args.debug:\n phy.debug()\n\n # Hide the traceback.\n if args.hide_traceback:\n def exception_handler(exception_type, exception, traceback):\n print(\"{}: {}\".format(exception_type.__name__, exception))\n\n sys.excepthook = exception_handler\n\n # Activate IPython debugger.\n if args.pdb:\n from IPython.core import ultratb\n sys.excepthook = ultratb.FormattedTB(mode='Verbose',\n color_scheme='Linux',\n call_pdb=1,\n )\n\n func = args.func\n if func is None:\n p.parser.print_help()\n return\n\n out = func(args)\n if not out:\n return\n cmd, ns = out\n if not cmd:\n return\n requires_qt = ns.pop('requires_qt', False)\n requires_vispy = ns.pop('requires_vispy', False)\n\n # Default variables in namespace.\n ns.update(phy=phy, path=args.file)\n if 'session' in ns:\n ns['model'] = ns['session'].model\n\n # Interactive mode with IPython.\n if args.ipython:\n print(\"\\nStarting IPython...\")\n from IPython import start_ipython\n args_ipy = [\"-i\", \"-c='{}'\".format(cmd)]\n if requires_qt or requires_vispy:\n # Activate Qt event loop integration with Qt.\n args_ipy += [\"--gui=qt\"]\n start_ipython(args_ipy, user_ns=ns)\n else:\n if not prof:\n exec_(cmd, {}, ns)\n else:\n _profile(prof, cmd, {}, ns)\n\n if requires_qt:\n # Launch the Qt app.\n from phy.gui import run_qt_app\n run_qt_app()\n elif requires_vispy:\n # Launch the VisPy Qt app.\n from vispy.app import use_app, run\n use_app('pyqt4')\n run()\n\n\n#------------------------------------------------------------------------------\n# Entry point\n#------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
17,
18,
29,
30,
32
]
}
|
[
17,
18,
29,
30,
32
] |
# -*- coding: utf-8 -*-
"""
-----------------------------------------
IDEA Name : PyCharm
Project Name : HelloWorld
-----------------------------------------
File Name : task_worker
Description :
Author : Edwin
Date : 2018/1/4 23:38
-----------------------------------------
Changer : Edwin
Date : 2018/1/4 23:38
Description :
-----------------------------------------
"""
__author__ = 'Edwin'
import queue
import time
from multiprocessing.managers import BaseManager
# 创建类似的QueueManager:
class QueueManager(BaseManager):
pass
def start_request():
# 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:
QueueManager.register('get_task_queue')
QueueManager.register('get_result_queue')
# 连接到服务器,也就是运行task_master.py的机器:
server_add = '127.0.0.1'
print('Connect to server %s...' % server_add)
# 端口和验证码注意保持与task_master.py设置的完全一致:
manager = QueueManager(address=(server_add, 5000), authkey=b'abc')
# 从网络连接:
manager.connect()
# 获取Queue的对象:
task = manager.get_task_queue()
result = manager.get_result_queue()
# 从task队列取任务,并把结果写入result队列:
for i in range(10):
try:
n = task.get(timeout=1)
print('run task %d * %d...' % (n, n))
r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))
time.sleep(5)
result.put(r)
except queue.Empty:
print('task queue is empty!')
# 处理结果
print('worker exit..')
if __name__ == '__main__':
start_request()
|
normal
|
{
"blob_id": "be1bfa3e366d715d32613284924cf79abde06d41",
"index": 582,
"step-1": "<mask token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-3": "<mask token>\n__author__ = 'Edwin'\n<mask token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-4": "<mask token>\n__author__ = 'Edwin'\nimport queue\nimport time\nfrom multiprocessing.managers import BaseManager\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------\n IDEA Name : PyCharm \n Project Name : HelloWorld\n-----------------------------------------\n File Name : task_worker\n Description :\n Author : Edwin\n Date : 2018/1/4 23:38\n-----------------------------------------\n Changer : Edwin\n Date : 2018/1/4 23:38\n Description : \n-----------------------------------------\n\"\"\"\n__author__ = 'Edwin'\n\nimport queue\nimport time\nfrom multiprocessing.managers import BaseManager\n\n\n# 创建类似的QueueManager:\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n # 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n\n # 连接到服务器,也就是运行task_master.py的机器:\n server_add = '127.0.0.1'\n\n print('Connect to server %s...' % server_add)\n # 端口和验证码注意保持与task_master.py设置的完全一致:\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n # 从网络连接:\n manager.connect()\n # 获取Queue的对象:\n\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n\n # 从task队列取任务,并把结果写入result队列:\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n # 处理结果\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,
Union, overload)
from pccm.stubs import EnumClassValue, EnumValue
from cumm.tensorview import Tensor
class ConvMainUnitTest:
@staticmethod
def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor, padding: List[int], stride: List[int], dilation: List[int], ndim: int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int, o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int], num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[int], i_interleave: int = 1, w_interleave: int = 1, o_interleave: int = 1, alpha: float = 1, beta: float = 0, split_k_slices: int = 1, workspace: Tensor = Tensor(), mask_sparse: bool = False, increment_k_first: bool = False, mask: Tensor = Tensor(), mask_argsort: Tensor = Tensor(), indices: Tensor = Tensor(), mask_output: Tensor = Tensor()) -> None:
"""
Args:
input:
weight:
output:
padding:
stride:
dilation:
ndim:
iter_algo_:
op_type_:
i_ltype_:
w_ltype_:
o_ltype_:
ts:
wts:
num_stage:
dacc:
dcomp:
algo:
tensorop:
i_interleave:
w_interleave:
o_interleave:
alpha:
beta:
split_k_slices:
workspace:
mask_sparse:
increment_k_first:
mask:
mask_argsort:
indices:
mask_output:
"""
...
|
normal
|
{
"blob_id": "a6f3c51d4115a6e0d6f01aa75bf5e6e367840d43",
"index": 914,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConvMainUnitTest:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ConvMainUnitTest:\n\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor,\n padding: List[int], stride: List[int], dilation: List[int], ndim:\n int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int,\n o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int],\n num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[\n int], i_interleave: int=1, w_interleave: int=1, o_interleave: int=1,\n alpha: float=1, beta: float=0, split_k_slices: int=1, workspace:\n Tensor=Tensor(), mask_sparse: bool=False, increment_k_first: bool=\n False, mask: Tensor=Tensor(), mask_argsort: Tensor=Tensor(),\n indices: Tensor=Tensor(), mask_output: Tensor=Tensor()) ->None:\n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...\n",
"step-4": "from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, overload\nfrom pccm.stubs import EnumClassValue, EnumValue\nfrom cumm.tensorview import Tensor\n\n\nclass ConvMainUnitTest:\n\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor,\n padding: List[int], stride: List[int], dilation: List[int], ndim:\n int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int,\n o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int],\n num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[\n int], i_interleave: int=1, w_interleave: int=1, o_interleave: int=1,\n alpha: float=1, beta: float=0, split_k_slices: int=1, workspace:\n Tensor=Tensor(), mask_sparse: bool=False, increment_k_first: bool=\n False, mask: Tensor=Tensor(), mask_argsort: Tensor=Tensor(),\n indices: Tensor=Tensor(), mask_output: Tensor=Tensor()) ->None:\n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...\n",
"step-5": "from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,\n Union, overload)\n\nfrom pccm.stubs import EnumClassValue, EnumValue\n\nfrom cumm.tensorview import Tensor\n\nclass ConvMainUnitTest:\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor, padding: List[int], stride: List[int], dilation: List[int], ndim: int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int, o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int], num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[int], i_interleave: int = 1, w_interleave: int = 1, o_interleave: int = 1, alpha: float = 1, beta: float = 0, split_k_slices: int = 1, workspace: Tensor = Tensor(), mask_sparse: bool = False, increment_k_first: bool = False, mask: Tensor = Tensor(), mask_argsort: Tensor = Tensor(), indices: Tensor = Tensor(), mask_output: Tensor = Tensor()) -> None: \n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import click
import csv
import sqlite3
from sqlite3.dbapi2 import Connection
import requests
import mimetypes
from urllib.parse import urljoin, urlparse
from lxml.html.soupparser import fromstring
from lxml import etree
from lxml.etree import tostring
from analysis import lmdict, tone_count_with_negation_check
from parser import parse_text
@click.command()
@click.option('-s','--batch-size', 'batch_size', default=50)
def analyze(batch_size):
db = db_connect()
db_ensure_init(db)
cmd = db.execute("SELECT id, url FROM reports WHERE is_analyzed = 0")
for batch in iter(lambda: cmd.fetchmany(batch_size), []):
to_update = list()
for r in batch:
print("Analyzing: " + r[1])
response = requests.get(r[1])
text = parse_text(response.text)
print(text[0:400] + '\n[CLIPPED]')
# perform text analysis
result = tone_count_with_negation_check(lmdict, text)
has_positive_sentiment = result[1] > result[2]
# TODO: FIXME
# Here you should pass in all the variables that you want to store in the database
# Refer to "db_update" method in what order params should be passed
to_update.append((
True,
has_positive_sentiment,
result[0],
result[1],
result[2],
" ".join(result[3]),
" ".join(result[4]),
r[0]))
db_update(db, to_update)
@click.command()
@click.argument('start', nargs=1)
@click.argument('end', nargs=1)
@click.option('-s','--batch-size', 'batch_size', default=50)
def fetch_report_urls(start, end, batch_size):
"""Fetches and stores the 10-K report URLs"""
db = db_connect()
db_ensure_init(db)
with open('log.csv', 'w', newline='') as log:
logwriter = csv.writer(log)
cmd = db.execute("""
SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path
FROM "index" ix
LEFT JOIN reports r ON ix.id = r.index_id
WHERE ix.type = '10-K' AND r.id IS NULL AND
CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND
CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}
ORDER BY ix.date DESC
""".format(start=start, end=end))
for batch in iter(lambda: cmd.fetchmany(batch_size), []):
to_insert = list()
for r in batch:
# print(r)
log_row = r
response = requests.get(r[5])
href = parse_href(response.content)
url = fix_url(href, r[5])
print(url)
filetype = mimetypes.guess_type(url)[0]
print(filetype)
filename = os.path.basename(urlparse(url).path)
print(filename)
to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))
logwriter.writerow(log_row)
db_insert(db, to_insert)
def parse_href(html_content):
# print(html_content)
root = to_doc(html_content)
# f = open("debug_idx.html", "wb")
# f.write(tostring(root, pretty_print=True))
# f.close()
elements = root.xpath('(//div[@id="formDiv"]//table//tr[2]/td[3]/a)')
if len(elements) == 0:
raise Exception("Unable to parse URL from index page")
href = elements[0].get('href')
return href
def fix_url(href, base_url):
# if the url links to an interactive iXBRL adjust the URL to link to the normal html
# eg. https://www.sec.gov/ix?doc=/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm
# -> https://www.sec.gov/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm
path = href.replace('ix?doc=/', '')
# a relative url needs to be joined with the base url
url = urljoin(base_url, path)
return url
def to_doc(content):
# Try to parse as XML/XHTML and fallback to soupparser
try:
doc = etree.fromstring(content)
except:
doc = fromstring(content)
return doc
def db_connect():
db = sqlite3.connect('edgar_htm_idx.sqlite3')
return db
def db_insert(db: Connection, records):
c = db.cursor()
c.executemany("INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", records)
db.commit()
def db_update(db: Connection, records):
c = db.cursor()
c.executemany("""
UPDATE reports SET
is_analyzed = ?,
has_positive_sentiment = ?,
word_count = ?,
pos_count = ?,
neg_count = ?,
pos_words = ?,
neg_words = ?
where id = ?""", records)
db.commit()
def db_ensure_init(db: Connection):
cur = db.cursor()
# TODO: FIXME add any new columns you want to store in the database
cur.execute("""CREATE TABLE IF NOT EXISTS "reports" (
"id" INTEGER NOT NULL,
"index_id" INTEGER UNIQUE,
"conm" TEXT,
"type" TEXT,
"cik" TEXT,
"date" TEXT,
"url" TEXT,
"filetype" TEXT,
"filename" TEXT,
"is_analyzed" INTEGER DEFAULT 0,
"has_positive_sentiment" INTEGER,
"word_count" INTEGER,
"pos_count" INTEGER,
"neg_count" INTEGER,
"pos_words" TEXT,
"neg_words" TEXT,
PRIMARY KEY("id" AUTOINCREMENT)
FOREIGN KEY (index_id) REFERENCES "index"(id)
);""")
@click.group()
def cli():
pass
cli.add_command(fetch_report_urls)
cli.add_command(analyze)
if __name__ == '__main__':
cli()
|
normal
|
{
"blob_id": "88e4e6647d4720d1c99f3e3438100790903921b5",
"index": 9163,
"step-1": "<mask token>\n\n\n@click.command()\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\n<mask token>\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\n<mask token>\n\n\n@click.group()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@click.command()\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\n@click.command()\n@click.argument('start', nargs=1)\n@click.argument('end', nargs=1)\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n cmd = db.execute(\n \"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\"\n .format(start=start, end=end))\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n log_row = r\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url,\n filetype, filename))\n logwriter.writerow(log_row)\n db_insert(db, to_insert)\n\n\n<mask token>\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\n<mask token>\n\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\n\n<mask token>\n\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\n \"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\"\n , records)\n db.commit()\n\n\n<mask token>\n\n\n@click.group()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@click.command()\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\n@click.command()\n@click.argument('start', nargs=1)\n@click.argument('end', nargs=1)\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n cmd = db.execute(\n \"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\"\n .format(start=start, end=end))\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n log_row = r\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url,\n filetype, filename))\n logwriter.writerow(log_row)\n db_insert(db, to_insert)\n\n\ndef parse_href(html_content):\n root = to_doc(html_content)\n elements = root.xpath('(//div[@id=\"formDiv\"]//table//tr[2]/td[3]/a)')\n if len(elements) == 0:\n raise Exception('Unable to parse URL from index page')\n href = elements[0].get('href')\n return href\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\ndef to_doc(content):\n try:\n doc = etree.fromstring(content)\n except:\n doc = fromstring(content)\n return doc\n\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\n\ndef db_insert(db: Connection, records):\n c = db.cursor()\n c.executemany(\n 'INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'\n , records)\n db.commit()\n\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\n \"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\"\n , records)\n db.commit()\n\n\n<mask token>\n\n\n@click.group()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\n@click.command()\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n cmd = db.execute('SELECT id, url FROM reports WHERE is_analyzed = 0')\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print('Analyzing: ' + r[1])\n response = requests.get(r[1])\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n result = tone_count_with_negation_check(lmdict, text)\n has_positive_sentiment = result[1] > result[2]\n to_update.append((True, has_positive_sentiment, result[0],\n result[1], result[2], ' '.join(result[3]), ' '.join(result[\n 4]), r[0]))\n db_update(db, to_update)\n\n\n@click.command()\n@click.argument('start', nargs=1)\n@click.argument('end', nargs=1)\n@click.option('-s', '--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n cmd = db.execute(\n \"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\"\n .format(start=start, end=end))\n for batch in iter(lambda : cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n log_row = r\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url,\n filetype, filename))\n logwriter.writerow(log_row)\n db_insert(db, to_insert)\n\n\ndef parse_href(html_content):\n root = to_doc(html_content)\n elements = root.xpath('(//div[@id=\"formDiv\"]//table//tr[2]/td[3]/a)')\n if len(elements) == 0:\n raise Exception('Unable to parse URL from index page')\n href = elements[0].get('href')\n return href\n\n\ndef fix_url(href, base_url):\n path = href.replace('ix?doc=/', '')\n url = urljoin(base_url, path)\n return url\n\n\ndef to_doc(content):\n try:\n doc = etree.fromstring(content)\n except:\n doc = fromstring(content)\n return doc\n\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\n\ndef db_insert(db: Connection, records):\n c = db.cursor()\n c.executemany(\n 'INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)'\n , records)\n db.commit()\n\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\n \"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\"\n , records)\n db.commit()\n\n\ndef db_ensure_init(db: Connection):\n cur = db.cursor()\n cur.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS \"reports\" (\n \"id\"\tINTEGER NOT NULL,\n \"index_id\" INTEGER UNIQUE,\n \"conm\" TEXT,\n \"type\" TEXT,\n \"cik\" TEXT,\n \"date\" TEXT,\n \"url\"\tTEXT,\n \"filetype\"\tTEXT,\n \"filename\"\tTEXT,\n \"is_analyzed\"\tINTEGER DEFAULT 0,\n \"has_positive_sentiment\" INTEGER,\n \"word_count\" INTEGER,\n \"pos_count\" INTEGER,\n \"neg_count\" INTEGER,\n \"pos_words\" TEXT,\n \"neg_words\" TEXT,\n PRIMARY KEY(\"id\" AUTOINCREMENT)\n FOREIGN KEY (index_id) REFERENCES \"index\"(id)\n );\"\"\"\n )\n\n\n@click.group()\ndef cli():\n pass\n\n\n<mask token>\n",
"step-5": "import os\nimport click\nimport csv\nimport sqlite3\nfrom sqlite3.dbapi2 import Connection\nimport requests\nimport mimetypes\nfrom urllib.parse import urljoin, urlparse\nfrom lxml.html.soupparser import fromstring\nfrom lxml import etree\nfrom lxml.etree import tostring\nfrom analysis import lmdict, tone_count_with_negation_check\nfrom parser import parse_text\n\n@click.command()\n@click.option('-s','--batch-size', 'batch_size', default=50)\ndef analyze(batch_size):\n db = db_connect()\n db_ensure_init(db)\n\n cmd = db.execute(\"SELECT id, url FROM reports WHERE is_analyzed = 0\")\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_update = list()\n for r in batch:\n print(\"Analyzing: \" + r[1])\n response = requests.get(r[1])\n\n text = parse_text(response.text)\n print(text[0:400] + '\\n[CLIPPED]')\n\n # perform text analysis\n result = tone_count_with_negation_check(lmdict, text)\n\n has_positive_sentiment = result[1] > result[2]\n\n # TODO: FIXME\n # Here you should pass in all the variables that you want to store in the database\n # Refer to \"db_update\" method in what order params should be passed\n to_update.append((\n True,\n has_positive_sentiment,\n result[0],\n result[1],\n result[2],\n \" \".join(result[3]),\n \" \".join(result[4]),\n r[0]))\n\n db_update(db, to_update)\n\n\n@click.command()\n@click.argument('start', nargs=1)\n@click.argument('end', nargs=1)\n@click.option('-s','--batch-size', 'batch_size', default=50)\ndef fetch_report_urls(start, end, batch_size):\n \"\"\"Fetches and stores the 10-K report URLs\"\"\"\n db = db_connect()\n db_ensure_init(db)\n\n with open('log.csv', 'w', newline='') as log:\n logwriter = csv.writer(log)\n\n cmd = db.execute(\"\"\"\n SELECT ix.id, ix.conm, ix.type, ix.cik, ix.date, ix.path\n FROM \"index\" ix\n LEFT JOIN reports r ON ix.id = r.index_id\n WHERE ix.type = '10-K' AND r.id IS NULL AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) >= {start} AND\n CAST(strftime('%Y', DATE(ix.date)) as INT) <= {end}\n ORDER BY ix.date DESC\n \"\"\".format(start=start, end=end))\n\n for batch in iter(lambda: cmd.fetchmany(batch_size), []):\n to_insert = list()\n for r in batch:\n # print(r)\n log_row = r\n\n response = requests.get(r[5])\n href = parse_href(response.content)\n url = fix_url(href, r[5])\n print(url)\n\n filetype = mimetypes.guess_type(url)[0]\n print(filetype)\n\n filename = os.path.basename(urlparse(url).path)\n print(filename)\n\n to_insert.append((r[0], r[1], r[2], r[3], r[4], url, filetype, filename))\n\n logwriter.writerow(log_row)\n\n db_insert(db, to_insert)\n\ndef parse_href(html_content):\n # print(html_content)\n root = to_doc(html_content)\n # f = open(\"debug_idx.html\", \"wb\")\n # f.write(tostring(root, pretty_print=True))\n # f.close()\n elements = root.xpath('(//div[@id=\"formDiv\"]//table//tr[2]/td[3]/a)')\n\n if len(elements) == 0:\n raise Exception(\"Unable to parse URL from index page\")\n\n href = elements[0].get('href')\n return href\n\ndef fix_url(href, base_url):\n # if the url links to an interactive iXBRL adjust the URL to link to the normal html\n # eg. https://www.sec.gov/ix?doc=/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm\n # -> https://www.sec.gov/Archives/edgar/data/1018840/000101884020000094/anf-20201031.htm\n path = href.replace('ix?doc=/', '')\n # a relative url needs to be joined with the base url\n url = urljoin(base_url, path)\n return url\n\ndef to_doc(content):\n # Try to parse as XML/XHTML and fallback to soupparser\n try:\n doc = etree.fromstring(content)\n except:\n doc = fromstring(content)\n\n return doc\n\ndef db_connect():\n db = sqlite3.connect('edgar_htm_idx.sqlite3')\n return db\n\ndef db_insert(db: Connection, records):\n c = db.cursor()\n c.executemany(\"INSERT INTO reports(index_id, conm, type, cik, date, url, filetype, filename) VALUES (?, ?, ?, ?, ?, ?, ?, ?)\", records)\n db.commit()\n\ndef db_update(db: Connection, records):\n c = db.cursor()\n c.executemany(\"\"\"\n UPDATE reports SET\n is_analyzed = ?,\n has_positive_sentiment = ?,\n word_count = ?,\n pos_count = ?,\n neg_count = ?,\n pos_words = ?,\n neg_words = ?\n where id = ?\"\"\", records)\n db.commit()\n\ndef db_ensure_init(db: Connection):\n cur = db.cursor()\n # TODO: FIXME add any new columns you want to store in the database\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS \"reports\" (\n \"id\"\tINTEGER NOT NULL,\n \"index_id\" INTEGER UNIQUE,\n \"conm\" TEXT,\n \"type\" TEXT,\n \"cik\" TEXT,\n \"date\" TEXT,\n \"url\"\tTEXT,\n \"filetype\"\tTEXT,\n \"filename\"\tTEXT,\n \"is_analyzed\"\tINTEGER DEFAULT 0,\n \"has_positive_sentiment\" INTEGER,\n \"word_count\" INTEGER,\n \"pos_count\" INTEGER,\n \"neg_count\" INTEGER,\n \"pos_words\" TEXT,\n \"neg_words\" TEXT,\n PRIMARY KEY(\"id\" AUTOINCREMENT)\n FOREIGN KEY (index_id) REFERENCES \"index\"(id)\n );\"\"\")\n\n\n@click.group()\ndef cli():\n pass\n\ncli.add_command(fetch_report_urls)\ncli.add_command(analyze)\n\nif __name__ == '__main__':\n cli()\n",
"step-ids": [
3,
6,
9,
10,
13
]
}
|
[
3,
6,
9,
10,
13
] |
"""Functions for updating and performing bulk inference using an Keras MPNN model"""
from typing import List, Dict, Tuple
import numpy as np
import tensorflow as tf
from molgym.mpnn.data import convert_nx_to_dict
from molgym.mpnn.layers import custom_objects
from molgym.utils.conversions import convert_smiles_to_nx
# TODO (wardlt): Make this Keras message object usable elsewhere
class MPNNMessage:
"""Package for sending an MPNN model over pickle"""
def __init__(self, model: tf.keras.Model):
"""
Args:
model: Model to be sent
"""
self.config = model.to_json()
# Makes a copy of the weights to ensure they are not memoryview objects
self.weights = [np.array(v) for v in model.get_weights()]
def get_model(self) -> tf.keras.Model:
model = tf.keras.models.model_from_json(self.config, custom_objects=custom_objects)
model.set_weights(self.weights)
return model
def _merge_batch(mols: List[dict]) -> dict:
"""Merge a list of molecules into a single batch
Args:
mols: List of molecules in dictionary format
Returns:
Single batch of molecules
"""
# Convert arrays to array
# Stack the values from each array
batch = dict(
(k, np.concatenate([np.atleast_1d(m[k]) for m in mols], axis=0))
for k in mols[0].keys()
)
# Compute the mappings from bond index to graph index
batch_size = len(mols)
mol_id = np.arange(batch_size, dtype=np.int)
batch['node_graph_indices'] = np.repeat(mol_id, batch['n_atom'], axis=0)
batch['bond_graph_indices'] = np.repeat(mol_id, batch['n_bond'], axis=0)
# Compute offsets for the connectivity matrix
offset_values = np.zeros(batch_size, dtype=np.int)
np.cumsum(batch['n_atom'][:-1], out=offset_values[1:])
offsets = np.repeat(offset_values, batch['n_bond'], axis=0)
batch['connectivity'] += np.expand_dims(offsets, 1)
return batch
def evaluate_mpnn(model_msg: MPNNMessage, smiles: List[str],
atom_types: List[int], bond_types: List[str], batch_size: int = 128) -> np.ndarray:
"""Run inference on a list of molecules
Args:
model_msg: Serialized version of the model
smiles: List of molecules to evaluate
atom_types: List of known atom types
bond_types: List of known bond types
batch_size: List of molecules to create into matches
Returns:
Predicted value for each molecule
"""
# Rebuild the model
tf.keras.backend.clear_session()
model = model_msg.get_model()
# Convert all SMILES strings to batches of molecules
# TODO (wardlt): Use multiprocessing. Could benefit from a persistent Pool to avoid loading in TF many times
mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types, bond_types) for s in smiles]
chunks = [mols[start:start + batch_size] for start in range(0, len(mols), batch_size)]
batches = [_merge_batch(c) for c in chunks]
# Feed the batches through the MPNN
outputs = [model.predict_on_batch(b) for b in batches]
return np.vstack(outputs)
# TODO (wardlt): Move to the MPNN library?
class GraphLoader(tf.keras.utils.Sequence):
"""Keras-compatible data loader for training a graph problem"""
def __init__(self, smiles: List[str], atom_types: List[int], bond_types: List[str],
outputs: List[float], batch_size: int, shuffle: bool = True, random_state: int = None):
"""
Args:
smiles: List of molecules
atom_types: List of known atom types
bond_types: List of known bond types
outputs: List of molecular outputs
batch_size: Number of batches to use to train model
shuffle: Whether to shuffle after each epoch
random_state: Random state for the shuffling
"""
super(GraphLoader, self).__init__()
# Convert the molecules to MPNN-ready formats
mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types, bond_types) for s in smiles]
self.entries = np.array(list(zip(mols, outputs)))
# Other data
self.batch_size = batch_size
self.shuffle = shuffle
# Give it a first shuffle, if needed
self.rng = np.random.RandomState(random_state)
if shuffle:
self.rng.shuffle(self.entries)
def __getitem__(self, item):
# Get the desired chunk of entries
start = item * self.batch_size
chunk = self.entries[start:start + self.batch_size]
# Get the molecules and outputs out
mols, y = zip(*chunk)
x = _merge_batch(mols)
return x, np.array(y)
def __len__(self):
# Get the number of batches
train_size = len(self.entries)
n_batches = train_size // self.batch_size
# Add a partially-full batch at the end
if train_size % self.batch_size != 0:
n_batches += 1
return n_batches
# TODO (wardlt): Evaluate whether the model stays in memory after training. If so, clear graph?
def update_mpnn(model_msg: MPNNMessage, database: Dict[str, float], num_epochs: int,
atom_types: List[int], bond_types: List[str], batch_size: int = 512,
validation_split: float = 0.1, random_state: int = 1, learning_rate: float = 1e-3)\
-> Tuple[List, dict]:
"""Update a model with new training sets
Args:
model_msg: Serialized version of the model
database: Training dataset of molecule mapped to a property
atom_types: List of known atom types
bond_types: List of known bond types
num_epochs: Number of epochs to run
batch_size: Number of molecules per training batch
validation_split: Fraction of molecules used for the training/validation split
random_state: Seed to the random number generator. Ensures entries do not move between train
and validation set as the database becomes larger
learning_rate: Learning rate for the Adam optimizer
Returns:
model: Updated weights
history: Training history
"""
# Rebuild the model
tf.keras.backend.clear_session()
model = model_msg.get_model()
model.compile(tf.keras.optimizers.Adam(lr=learning_rate), 'mean_absolute_error')
# Separate the database into molecules and properties
smiles, y = zip(*database.items())
# Make the training and validation splits
# Use a random number generator with fixed seed to ensure that the validation
# set is never polluted with entries from the training set
# TODO (wardlt): Replace with passing train and validation separately?
rng = np.random.RandomState(random_state)
train_split = rng.rand(len(smiles)) > validation_split
# Make the loaders
smiles = np.array(smiles)
y = np.array(y)
train_loader = GraphLoader(smiles[train_split], atom_types, bond_types, y[train_split],
batch_size=batch_size)
val_loader = GraphLoader(smiles[~train_split], atom_types, bond_types, y[~train_split],
batch_size=batch_size, shuffle=False)
# Run the desired number of epochs
# TODO (wardlt): Should we use callbacks to get only the "best model" based on the validation set?
history = model.fit(train_loader, epochs=num_epochs, validation_data=val_loader, verbose=False)
return [np.array(v) for v in model.get_weights()], history.history
|
normal
|
{
"blob_id": "95ab8fce573ef959946d50d9af6e893cb8798917",
"index": 6714,
"step-1": "<mask token>\n\n\nclass MPNNMessage:\n \"\"\"Package for sending an MPNN model over pickle\"\"\"\n\n def __init__(self, model: tf.keras.Model):\n \"\"\"\n Args:\n model: Model to be sent\n \"\"\"\n self.config = model.to_json()\n self.weights = [np.array(v) for v in model.get_weights()]\n\n def get_model(self) ->tf.keras.Model:\n model = tf.keras.models.model_from_json(self.config, custom_objects\n =custom_objects)\n model.set_weights(self.weights)\n return model\n\n\n<mask token>\n\n\nclass GraphLoader(tf.keras.utils.Sequence):\n \"\"\"Keras-compatible data loader for training a graph problem\"\"\"\n\n def __init__(self, smiles: List[str], atom_types: List[int], bond_types:\n List[str], outputs: List[float], batch_size: int, shuffle: bool=\n True, random_state: int=None):\n \"\"\"\n\n Args:\n smiles: List of molecules\n atom_types: List of known atom types\n bond_types: List of known bond types\n outputs: List of molecular outputs\n batch_size: Number of batches to use to train model\n shuffle: Whether to shuffle after each epoch\n random_state: Random state for the shuffling\n \"\"\"\n super(GraphLoader, self).__init__()\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types,\n bond_types) for s in smiles]\n self.entries = np.array(list(zip(mols, outputs)))\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.rng = np.random.RandomState(random_state)\n if shuffle:\n self.rng.shuffle(self.entries)\n\n def __getitem__(self, item):\n start = item * self.batch_size\n chunk = self.entries[start:start + self.batch_size]\n mols, y = zip(*chunk)\n x = _merge_batch(mols)\n return x, np.array(y)\n\n def __len__(self):\n train_size = len(self.entries)\n n_batches = train_size // self.batch_size\n if train_size % self.batch_size != 0:\n n_batches += 1\n return n_batches\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MPNNMessage:\n \"\"\"Package for sending an MPNN model over pickle\"\"\"\n\n def __init__(self, model: tf.keras.Model):\n \"\"\"\n Args:\n model: Model to be sent\n \"\"\"\n self.config = model.to_json()\n self.weights = [np.array(v) for v in model.get_weights()]\n\n def get_model(self) ->tf.keras.Model:\n model = tf.keras.models.model_from_json(self.config, custom_objects\n =custom_objects)\n model.set_weights(self.weights)\n return model\n\n\ndef _merge_batch(mols: List[dict]) ->dict:\n \"\"\"Merge a list of molecules into a single batch\n\n Args:\n mols: List of molecules in dictionary format\n Returns:\n Single batch of molecules\n \"\"\"\n batch = dict((k, np.concatenate([np.atleast_1d(m[k]) for m in mols],\n axis=0)) for k in mols[0].keys())\n batch_size = len(mols)\n mol_id = np.arange(batch_size, dtype=np.int)\n batch['node_graph_indices'] = np.repeat(mol_id, batch['n_atom'], axis=0)\n batch['bond_graph_indices'] = np.repeat(mol_id, batch['n_bond'], axis=0)\n offset_values = np.zeros(batch_size, dtype=np.int)\n np.cumsum(batch['n_atom'][:-1], out=offset_values[1:])\n offsets = np.repeat(offset_values, batch['n_bond'], axis=0)\n batch['connectivity'] += np.expand_dims(offsets, 1)\n return batch\n\n\n<mask token>\n\n\nclass GraphLoader(tf.keras.utils.Sequence):\n \"\"\"Keras-compatible data loader for training a graph problem\"\"\"\n\n def __init__(self, smiles: List[str], atom_types: List[int], bond_types:\n List[str], outputs: List[float], batch_size: int, shuffle: bool=\n True, random_state: int=None):\n \"\"\"\n\n Args:\n smiles: List of molecules\n atom_types: List of known atom types\n bond_types: List of known bond types\n outputs: List of molecular outputs\n batch_size: Number of batches to use to train model\n shuffle: Whether to shuffle after each epoch\n random_state: Random state for the shuffling\n \"\"\"\n super(GraphLoader, self).__init__()\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types,\n bond_types) for s in smiles]\n self.entries = np.array(list(zip(mols, outputs)))\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.rng = np.random.RandomState(random_state)\n if shuffle:\n self.rng.shuffle(self.entries)\n\n def __getitem__(self, item):\n start = item * self.batch_size\n chunk = self.entries[start:start + self.batch_size]\n mols, y = zip(*chunk)\n x = _merge_batch(mols)\n return x, np.array(y)\n\n def __len__(self):\n train_size = len(self.entries)\n n_batches = train_size // self.batch_size\n if train_size % self.batch_size != 0:\n n_batches += 1\n return n_batches\n\n\ndef update_mpnn(model_msg: MPNNMessage, database: Dict[str, float],\n num_epochs: int, atom_types: List[int], bond_types: List[str],\n batch_size: int=512, validation_split: float=0.1, random_state: int=1,\n learning_rate: float=0.001) ->Tuple[List, dict]:\n \"\"\"Update a model with new training sets\n\n Args:\n model_msg: Serialized version of the model\n database: Training dataset of molecule mapped to a property\n atom_types: List of known atom types\n bond_types: List of known bond types\n num_epochs: Number of epochs to run\n batch_size: Number of molecules per training batch\n validation_split: Fraction of molecules used for the training/validation split\n random_state: Seed to the random number generator. Ensures entries do not move between train\n and validation set as the database becomes larger\n learning_rate: Learning rate for the Adam optimizer\n Returns:\n model: Updated weights\n history: Training history\n \"\"\"\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n model.compile(tf.keras.optimizers.Adam(lr=learning_rate),\n 'mean_absolute_error')\n smiles, y = zip(*database.items())\n rng = np.random.RandomState(random_state)\n train_split = rng.rand(len(smiles)) > validation_split\n smiles = np.array(smiles)\n y = np.array(y)\n train_loader = GraphLoader(smiles[train_split], atom_types, bond_types,\n y[train_split], batch_size=batch_size)\n val_loader = GraphLoader(smiles[~train_split], atom_types, bond_types,\n y[~train_split], batch_size=batch_size, shuffle=False)\n history = model.fit(train_loader, epochs=num_epochs, validation_data=\n val_loader, verbose=False)\n return [np.array(v) for v in model.get_weights()], history.history\n",
"step-3": "<mask token>\n\n\nclass MPNNMessage:\n \"\"\"Package for sending an MPNN model over pickle\"\"\"\n\n def __init__(self, model: tf.keras.Model):\n \"\"\"\n Args:\n model: Model to be sent\n \"\"\"\n self.config = model.to_json()\n self.weights = [np.array(v) for v in model.get_weights()]\n\n def get_model(self) ->tf.keras.Model:\n model = tf.keras.models.model_from_json(self.config, custom_objects\n =custom_objects)\n model.set_weights(self.weights)\n return model\n\n\ndef _merge_batch(mols: List[dict]) ->dict:\n \"\"\"Merge a list of molecules into a single batch\n\n Args:\n mols: List of molecules in dictionary format\n Returns:\n Single batch of molecules\n \"\"\"\n batch = dict((k, np.concatenate([np.atleast_1d(m[k]) for m in mols],\n axis=0)) for k in mols[0].keys())\n batch_size = len(mols)\n mol_id = np.arange(batch_size, dtype=np.int)\n batch['node_graph_indices'] = np.repeat(mol_id, batch['n_atom'], axis=0)\n batch['bond_graph_indices'] = np.repeat(mol_id, batch['n_bond'], axis=0)\n offset_values = np.zeros(batch_size, dtype=np.int)\n np.cumsum(batch['n_atom'][:-1], out=offset_values[1:])\n offsets = np.repeat(offset_values, batch['n_bond'], axis=0)\n batch['connectivity'] += np.expand_dims(offsets, 1)\n return batch\n\n\ndef evaluate_mpnn(model_msg: MPNNMessage, smiles: List[str], atom_types:\n List[int], bond_types: List[str], batch_size: int=128) ->np.ndarray:\n \"\"\"Run inference on a list of molecules\n\n Args:\n model_msg: Serialized version of the model\n smiles: List of molecules to evaluate\n atom_types: List of known atom types\n bond_types: List of known bond types\n batch_size: List of molecules to create into matches\n Returns:\n Predicted value for each molecule\n \"\"\"\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types,\n bond_types) for s in smiles]\n chunks = [mols[start:start + batch_size] for start in range(0, len(mols\n ), batch_size)]\n batches = [_merge_batch(c) for c in chunks]\n outputs = [model.predict_on_batch(b) for b in batches]\n return np.vstack(outputs)\n\n\nclass GraphLoader(tf.keras.utils.Sequence):\n \"\"\"Keras-compatible data loader for training a graph problem\"\"\"\n\n def __init__(self, smiles: List[str], atom_types: List[int], bond_types:\n List[str], outputs: List[float], batch_size: int, shuffle: bool=\n True, random_state: int=None):\n \"\"\"\n\n Args:\n smiles: List of molecules\n atom_types: List of known atom types\n bond_types: List of known bond types\n outputs: List of molecular outputs\n batch_size: Number of batches to use to train model\n shuffle: Whether to shuffle after each epoch\n random_state: Random state for the shuffling\n \"\"\"\n super(GraphLoader, self).__init__()\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types,\n bond_types) for s in smiles]\n self.entries = np.array(list(zip(mols, outputs)))\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.rng = np.random.RandomState(random_state)\n if shuffle:\n self.rng.shuffle(self.entries)\n\n def __getitem__(self, item):\n start = item * self.batch_size\n chunk = self.entries[start:start + self.batch_size]\n mols, y = zip(*chunk)\n x = _merge_batch(mols)\n return x, np.array(y)\n\n def __len__(self):\n train_size = len(self.entries)\n n_batches = train_size // self.batch_size\n if train_size % self.batch_size != 0:\n n_batches += 1\n return n_batches\n\n\ndef update_mpnn(model_msg: MPNNMessage, database: Dict[str, float],\n num_epochs: int, atom_types: List[int], bond_types: List[str],\n batch_size: int=512, validation_split: float=0.1, random_state: int=1,\n learning_rate: float=0.001) ->Tuple[List, dict]:\n \"\"\"Update a model with new training sets\n\n Args:\n model_msg: Serialized version of the model\n database: Training dataset of molecule mapped to a property\n atom_types: List of known atom types\n bond_types: List of known bond types\n num_epochs: Number of epochs to run\n batch_size: Number of molecules per training batch\n validation_split: Fraction of molecules used for the training/validation split\n random_state: Seed to the random number generator. Ensures entries do not move between train\n and validation set as the database becomes larger\n learning_rate: Learning rate for the Adam optimizer\n Returns:\n model: Updated weights\n history: Training history\n \"\"\"\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n model.compile(tf.keras.optimizers.Adam(lr=learning_rate),\n 'mean_absolute_error')\n smiles, y = zip(*database.items())\n rng = np.random.RandomState(random_state)\n train_split = rng.rand(len(smiles)) > validation_split\n smiles = np.array(smiles)\n y = np.array(y)\n train_loader = GraphLoader(smiles[train_split], atom_types, bond_types,\n y[train_split], batch_size=batch_size)\n val_loader = GraphLoader(smiles[~train_split], atom_types, bond_types,\n y[~train_split], batch_size=batch_size, shuffle=False)\n history = model.fit(train_loader, epochs=num_epochs, validation_data=\n val_loader, verbose=False)\n return [np.array(v) for v in model.get_weights()], history.history\n",
"step-4": "<mask token>\nfrom typing import List, Dict, Tuple\nimport numpy as np\nimport tensorflow as tf\nfrom molgym.mpnn.data import convert_nx_to_dict\nfrom molgym.mpnn.layers import custom_objects\nfrom molgym.utils.conversions import convert_smiles_to_nx\n\n\nclass MPNNMessage:\n \"\"\"Package for sending an MPNN model over pickle\"\"\"\n\n def __init__(self, model: tf.keras.Model):\n \"\"\"\n Args:\n model: Model to be sent\n \"\"\"\n self.config = model.to_json()\n self.weights = [np.array(v) for v in model.get_weights()]\n\n def get_model(self) ->tf.keras.Model:\n model = tf.keras.models.model_from_json(self.config, custom_objects\n =custom_objects)\n model.set_weights(self.weights)\n return model\n\n\ndef _merge_batch(mols: List[dict]) ->dict:\n \"\"\"Merge a list of molecules into a single batch\n\n Args:\n mols: List of molecules in dictionary format\n Returns:\n Single batch of molecules\n \"\"\"\n batch = dict((k, np.concatenate([np.atleast_1d(m[k]) for m in mols],\n axis=0)) for k in mols[0].keys())\n batch_size = len(mols)\n mol_id = np.arange(batch_size, dtype=np.int)\n batch['node_graph_indices'] = np.repeat(mol_id, batch['n_atom'], axis=0)\n batch['bond_graph_indices'] = np.repeat(mol_id, batch['n_bond'], axis=0)\n offset_values = np.zeros(batch_size, dtype=np.int)\n np.cumsum(batch['n_atom'][:-1], out=offset_values[1:])\n offsets = np.repeat(offset_values, batch['n_bond'], axis=0)\n batch['connectivity'] += np.expand_dims(offsets, 1)\n return batch\n\n\ndef evaluate_mpnn(model_msg: MPNNMessage, smiles: List[str], atom_types:\n List[int], bond_types: List[str], batch_size: int=128) ->np.ndarray:\n \"\"\"Run inference on a list of molecules\n\n Args:\n model_msg: Serialized version of the model\n smiles: List of molecules to evaluate\n atom_types: List of known atom types\n bond_types: List of known bond types\n batch_size: List of molecules to create into matches\n Returns:\n Predicted value for each molecule\n \"\"\"\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types,\n bond_types) for s in smiles]\n chunks = [mols[start:start + batch_size] for start in range(0, len(mols\n ), batch_size)]\n batches = [_merge_batch(c) for c in chunks]\n outputs = [model.predict_on_batch(b) for b in batches]\n return np.vstack(outputs)\n\n\nclass GraphLoader(tf.keras.utils.Sequence):\n \"\"\"Keras-compatible data loader for training a graph problem\"\"\"\n\n def __init__(self, smiles: List[str], atom_types: List[int], bond_types:\n List[str], outputs: List[float], batch_size: int, shuffle: bool=\n True, random_state: int=None):\n \"\"\"\n\n Args:\n smiles: List of molecules\n atom_types: List of known atom types\n bond_types: List of known bond types\n outputs: List of molecular outputs\n batch_size: Number of batches to use to train model\n shuffle: Whether to shuffle after each epoch\n random_state: Random state for the shuffling\n \"\"\"\n super(GraphLoader, self).__init__()\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types,\n bond_types) for s in smiles]\n self.entries = np.array(list(zip(mols, outputs)))\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.rng = np.random.RandomState(random_state)\n if shuffle:\n self.rng.shuffle(self.entries)\n\n def __getitem__(self, item):\n start = item * self.batch_size\n chunk = self.entries[start:start + self.batch_size]\n mols, y = zip(*chunk)\n x = _merge_batch(mols)\n return x, np.array(y)\n\n def __len__(self):\n train_size = len(self.entries)\n n_batches = train_size // self.batch_size\n if train_size % self.batch_size != 0:\n n_batches += 1\n return n_batches\n\n\ndef update_mpnn(model_msg: MPNNMessage, database: Dict[str, float],\n num_epochs: int, atom_types: List[int], bond_types: List[str],\n batch_size: int=512, validation_split: float=0.1, random_state: int=1,\n learning_rate: float=0.001) ->Tuple[List, dict]:\n \"\"\"Update a model with new training sets\n\n Args:\n model_msg: Serialized version of the model\n database: Training dataset of molecule mapped to a property\n atom_types: List of known atom types\n bond_types: List of known bond types\n num_epochs: Number of epochs to run\n batch_size: Number of molecules per training batch\n validation_split: Fraction of molecules used for the training/validation split\n random_state: Seed to the random number generator. Ensures entries do not move between train\n and validation set as the database becomes larger\n learning_rate: Learning rate for the Adam optimizer\n Returns:\n model: Updated weights\n history: Training history\n \"\"\"\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n model.compile(tf.keras.optimizers.Adam(lr=learning_rate),\n 'mean_absolute_error')\n smiles, y = zip(*database.items())\n rng = np.random.RandomState(random_state)\n train_split = rng.rand(len(smiles)) > validation_split\n smiles = np.array(smiles)\n y = np.array(y)\n train_loader = GraphLoader(smiles[train_split], atom_types, bond_types,\n y[train_split], batch_size=batch_size)\n val_loader = GraphLoader(smiles[~train_split], atom_types, bond_types,\n y[~train_split], batch_size=batch_size, shuffle=False)\n history = model.fit(train_loader, epochs=num_epochs, validation_data=\n val_loader, verbose=False)\n return [np.array(v) for v in model.get_weights()], history.history\n",
"step-5": "\"\"\"Functions for updating and performing bulk inference using an Keras MPNN model\"\"\"\nfrom typing import List, Dict, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nfrom molgym.mpnn.data import convert_nx_to_dict\nfrom molgym.mpnn.layers import custom_objects\nfrom molgym.utils.conversions import convert_smiles_to_nx\n\n\n# TODO (wardlt): Make this Keras message object usable elsewhere\nclass MPNNMessage:\n \"\"\"Package for sending an MPNN model over pickle\"\"\"\n\n def __init__(self, model: tf.keras.Model):\n \"\"\"\n Args:\n model: Model to be sent\n \"\"\"\n\n self.config = model.to_json()\n # Makes a copy of the weights to ensure they are not memoryview objects\n self.weights = [np.array(v) for v in model.get_weights()]\n\n def get_model(self) -> tf.keras.Model:\n model = tf.keras.models.model_from_json(self.config, custom_objects=custom_objects)\n model.set_weights(self.weights)\n return model\n\n\ndef _merge_batch(mols: List[dict]) -> dict:\n \"\"\"Merge a list of molecules into a single batch\n\n Args:\n mols: List of molecules in dictionary format\n Returns:\n Single batch of molecules\n \"\"\"\n\n # Convert arrays to array\n\n # Stack the values from each array\n batch = dict(\n (k, np.concatenate([np.atleast_1d(m[k]) for m in mols], axis=0))\n for k in mols[0].keys()\n )\n\n # Compute the mappings from bond index to graph index\n batch_size = len(mols)\n mol_id = np.arange(batch_size, dtype=np.int)\n batch['node_graph_indices'] = np.repeat(mol_id, batch['n_atom'], axis=0)\n batch['bond_graph_indices'] = np.repeat(mol_id, batch['n_bond'], axis=0)\n\n # Compute offsets for the connectivity matrix\n offset_values = np.zeros(batch_size, dtype=np.int)\n np.cumsum(batch['n_atom'][:-1], out=offset_values[1:])\n offsets = np.repeat(offset_values, batch['n_bond'], axis=0)\n batch['connectivity'] += np.expand_dims(offsets, 1)\n\n return batch\n\n\ndef evaluate_mpnn(model_msg: MPNNMessage, smiles: List[str],\n atom_types: List[int], bond_types: List[str], batch_size: int = 128) -> np.ndarray:\n \"\"\"Run inference on a list of molecules\n\n Args:\n model_msg: Serialized version of the model\n smiles: List of molecules to evaluate\n atom_types: List of known atom types\n bond_types: List of known bond types\n batch_size: List of molecules to create into matches\n Returns:\n Predicted value for each molecule\n \"\"\"\n\n # Rebuild the model\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n\n # Convert all SMILES strings to batches of molecules\n # TODO (wardlt): Use multiprocessing. Could benefit from a persistent Pool to avoid loading in TF many times\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types, bond_types) for s in smiles]\n chunks = [mols[start:start + batch_size] for start in range(0, len(mols), batch_size)]\n batches = [_merge_batch(c) for c in chunks]\n\n # Feed the batches through the MPNN\n outputs = [model.predict_on_batch(b) for b in batches]\n return np.vstack(outputs)\n\n\n# TODO (wardlt): Move to the MPNN library?\nclass GraphLoader(tf.keras.utils.Sequence):\n \"\"\"Keras-compatible data loader for training a graph problem\"\"\"\n\n def __init__(self, smiles: List[str], atom_types: List[int], bond_types: List[str],\n outputs: List[float], batch_size: int, shuffle: bool = True, random_state: int = None):\n \"\"\"\n\n Args:\n smiles: List of molecules\n atom_types: List of known atom types\n bond_types: List of known bond types\n outputs: List of molecular outputs\n batch_size: Number of batches to use to train model\n shuffle: Whether to shuffle after each epoch\n random_state: Random state for the shuffling\n \"\"\"\n\n super(GraphLoader, self).__init__()\n\n # Convert the molecules to MPNN-ready formats\n mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types, bond_types) for s in smiles]\n self.entries = np.array(list(zip(mols, outputs)))\n\n # Other data\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n # Give it a first shuffle, if needed\n self.rng = np.random.RandomState(random_state)\n if shuffle:\n self.rng.shuffle(self.entries)\n\n def __getitem__(self, item):\n # Get the desired chunk of entries\n start = item * self.batch_size\n chunk = self.entries[start:start + self.batch_size]\n\n # Get the molecules and outputs out\n mols, y = zip(*chunk)\n x = _merge_batch(mols)\n return x, np.array(y)\n\n def __len__(self):\n # Get the number of batches\n train_size = len(self.entries)\n n_batches = train_size // self.batch_size\n\n # Add a partially-full batch at the end\n if train_size % self.batch_size != 0:\n n_batches += 1\n return n_batches\n\n\n# TODO (wardlt): Evaluate whether the model stays in memory after training. If so, clear graph?\ndef update_mpnn(model_msg: MPNNMessage, database: Dict[str, float], num_epochs: int,\n atom_types: List[int], bond_types: List[str], batch_size: int = 512,\n validation_split: float = 0.1, random_state: int = 1, learning_rate: float = 1e-3)\\\n -> Tuple[List, dict]:\n \"\"\"Update a model with new training sets\n\n Args:\n model_msg: Serialized version of the model\n database: Training dataset of molecule mapped to a property\n atom_types: List of known atom types\n bond_types: List of known bond types\n num_epochs: Number of epochs to run\n batch_size: Number of molecules per training batch\n validation_split: Fraction of molecules used for the training/validation split\n random_state: Seed to the random number generator. Ensures entries do not move between train\n and validation set as the database becomes larger\n learning_rate: Learning rate for the Adam optimizer\n Returns:\n model: Updated weights\n history: Training history\n \"\"\"\n\n # Rebuild the model\n tf.keras.backend.clear_session()\n model = model_msg.get_model()\n model.compile(tf.keras.optimizers.Adam(lr=learning_rate), 'mean_absolute_error')\n\n # Separate the database into molecules and properties\n smiles, y = zip(*database.items())\n\n # Make the training and validation splits\n # Use a random number generator with fixed seed to ensure that the validation\n # set is never polluted with entries from the training set\n # TODO (wardlt): Replace with passing train and validation separately?\n rng = np.random.RandomState(random_state)\n train_split = rng.rand(len(smiles)) > validation_split\n\n # Make the loaders\n smiles = np.array(smiles)\n y = np.array(y)\n train_loader = GraphLoader(smiles[train_split], atom_types, bond_types, y[train_split],\n batch_size=batch_size)\n val_loader = GraphLoader(smiles[~train_split], atom_types, bond_types, y[~train_split],\n batch_size=batch_size, shuffle=False)\n\n # Run the desired number of epochs\n # TODO (wardlt): Should we use callbacks to get only the \"best model\" based on the validation set?\n history = model.fit(train_loader, epochs=num_epochs, validation_data=val_loader, verbose=False)\n return [np.array(v) for v in model.get_weights()], history.history\n",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
#####################
# Aufgabe 2, 13.7 #
# v1.0 #
# baehll #
# 04.05.2018 #
#####################
class Pinnwand:
def __init__(self):
self.__zettel = []
def hefteAn(self, notiz):
#Analyse des Textes
prio = notiz.count("!")
self.__zettel.append((prio, notiz))
def entferne(self):
hoechste = 0
zettel = 0
for i in range(len(self.__zettel)):
if self.__zettel[i][0] > hoechste:
hoechste = self.__zettel[i][0]
zettel = i
print(self.__zettel[zettel][1])
del self.__zettel[zettel]
def __str__(self):
ausgabe = "Notizen\n"
zettelListe = self.__zettel[:]
zettelListe.sort(reverse=True)
print("Zettelliste: ")
print(zettelListe)
for z in zettelListe:
ausgabe += z[1] + "\t"
ausgabe += "(Priorität: " + str(z[0]) + ")" + "\n"
return ausgabe
menue = """
(N)eue Notiz anheften (A)lle Notizen auflisten
(W)ichtigste Notiz entfernen (E)nde
"""
p = Pinnwand()
while True:
print(menue)
eingabe = input("Ihre Wahl: ")
if eingabe in "nN":
notiz = input("Notiz: ")
while notiz != "":
p.hefteAn(notiz)
notiz = input("Notiz: ")
elif eingabe in "aA":
print(p)
elif eingabe in "wW":
p.entferne()
elif eingabe in "eE":
print("Tschüß!")
break
|
normal
|
{
"blob_id": "382a3b8bcd07c7098cecf2b770e46dfff50eeb98",
"index": 2695,
"step-1": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n <mask token>\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\n<mask token>\n",
"step-2": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\n<mask token>\n",
"step-3": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\n<mask token>\nwhile True:\n print(menue)\n eingabe = input('Ihre Wahl: ')\n if eingabe in 'nN':\n notiz = input('Notiz: ')\n while notiz != '':\n p.hefteAn(notiz)\n notiz = input('Notiz: ')\n elif eingabe in 'aA':\n print(p)\n elif eingabe in 'wW':\n p.entferne()\n elif eingabe in 'eE':\n print('Tschüß!')\n break\n",
"step-4": "class Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n\n def hefteAn(self, notiz):\n prio = notiz.count('!')\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n\n def __str__(self):\n ausgabe = 'Notizen\\n'\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print('Zettelliste: ')\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + '\\t'\n ausgabe += '(Priorität: ' + str(z[0]) + ')' + '\\n'\n return ausgabe\n\n\nmenue = \"\"\"\n (N)eue Notiz anheften (A)lle Notizen auflisten\n (W)ichtigste Notiz entfernen (E)nde\n\"\"\"\np = Pinnwand()\nwhile True:\n print(menue)\n eingabe = input('Ihre Wahl: ')\n if eingabe in 'nN':\n notiz = input('Notiz: ')\n while notiz != '':\n p.hefteAn(notiz)\n notiz = input('Notiz: ')\n elif eingabe in 'aA':\n print(p)\n elif eingabe in 'wW':\n p.entferne()\n elif eingabe in 'eE':\n print('Tschüß!')\n break\n",
"step-5": "#####################\n# Aufgabe 2, 13.7 #\n# v1.0 #\n# baehll #\n# 04.05.2018 #\n#####################\n\nclass Pinnwand:\n\n def __init__(self):\n self.__zettel = []\n \n def hefteAn(self, notiz):\n #Analyse des Textes\n prio = notiz.count(\"!\")\n self.__zettel.append((prio, notiz))\n\n def entferne(self):\n hoechste = 0\n zettel = 0\n for i in range(len(self.__zettel)):\n if self.__zettel[i][0] > hoechste:\n hoechste = self.__zettel[i][0]\n zettel = i\n print(self.__zettel[zettel][1])\n del self.__zettel[zettel]\n \n def __str__(self):\n ausgabe = \"Notizen\\n\"\n zettelListe = self.__zettel[:]\n zettelListe.sort(reverse=True)\n print(\"Zettelliste: \")\n print(zettelListe)\n for z in zettelListe:\n ausgabe += z[1] + \"\\t\"\n ausgabe += \"(Priorität: \" + str(z[0]) + \")\" + \"\\n\"\n return ausgabe\n\nmenue = \"\"\"\n (N)eue Notiz anheften (A)lle Notizen auflisten\n (W)ichtigste Notiz entfernen (E)nde\n\"\"\"\n\np = Pinnwand()\n\nwhile True:\n print(menue)\n eingabe = input(\"Ihre Wahl: \")\n if eingabe in \"nN\":\n notiz = input(\"Notiz: \")\n while notiz != \"\":\n p.hefteAn(notiz)\n notiz = input(\"Notiz: \")\n elif eingabe in \"aA\":\n print(p)\n elif eingabe in \"wW\":\n p.entferne()\n elif eingabe in \"eE\":\n print(\"Tschüß!\")\n break",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""
"""
import cPickle as pickle
def convert_cpu_stats_to_num_array(cpuStats):
"""
Given a list of statistics (tuples[timestamp, total_cpu, kernel_cpu, vm, rss])
Return five numarrays
"""
print "Converting cpus stats into numpy array"
c0 = []
c1 = []
c2 = []
c3 = []
c4 = []
# TODO - need a pythonic/numpy way for corner turning
gc.disable()
for c in cpuStats:
c0.append(c[0])
c1.append(c[1])
c2.append(c[2])
c3.append(c[3])
c4.append(c[4])
gc.enable()
return (np.array(c0), np.array(c1), np.array(c2), np.array(c3), np.array(c4))
def plot_cpu_mem_usage_from_file(cpufile, figfile, stt=None, x_max=None, time_label=None):
"""
Plot CPU and memory usage from a cpu log file
parameters:
cpufile: the full path of the cpu log file (string)
figfile: the full path of the plot file (string)
stt: start time stamp in seconds (Integer,
None if let it done automatically)
x_max: the duration of the time axis in seconds (Integer,
None automatically set)
time_label: full path to the application activity log (string)
each line is something like this:
2014-08-17 04:44:24 major cycle 3
2014-08-17 04:45:44 make image
If set, the plot tries to draw vertical lines along the
time axis to show these activities This is an experimental
feature, need more work
"""
reList = []
if os.path.exists(cpufile):
try:
pkl_file = open(cpufile, 'rb')
print 'Loading CPU stats object from file %s' % cpufile
cpuStatsList = pickle.load(pkl_file)
pkl_file.close()
if cpuStatsList == None:
raise Exception("The CPU stats object is None when reading from the file")
reList += cpuStatsList
#return cpuStatsList
except Exception, e:
ex = str(e)
import traceback
print 'Fail to load the CPU stats from file %s: %s' % (cpufile, ex)
traceback.print_exc()
raise e
else:
print 'Cannot locate the CPU stats file %s' % cpufile
fig = pl.figure()
plot_cpu_mem_usage(fig, x_max, reList, stt, standalone = True, time_label = time_label)
#fig.savefig('/tmp/cpu_mem_usage.pdf')
fig.savefig(figfile)
pl.close(fig)
def plot_cpu_mem_usage(fig, cpuStats, x_max = None, stt = None,
standalone = False, time_label = None):
if standalone:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_xlabel('Time (seconds)', fontsize = 9)
ax1.set_ylabel('CPU usage (% of Wall Clock time)', fontsize = 9)
ax1.set_title('CPU and Memory usage', fontsize=10)
ax1.tick_params(axis='both', which='major', labelsize=8)
ax1.tick_params(axis='both', which='minor', labelsize=6)
# get the data in numpy array
ta, tc, kc, vm, rss = convert_cpu_stats_to_num_array(cpuStats)
if stt is None:
stt = ta
ta -= stt
st = int(ta[0])
ed = int(ta[-1])
if x_max is None:
x_max = ed
elif ed > x_max:
x_max = ed
# create x-axis (whole integer seconds) between st and ed
# x = np.r_[st:ed + 1]
x = ta.astype(np.int64)
# plot the total cpu
ax1.plot(x, tc, color = 'g', linestyle = '-', label = 'total cpu')
# plot the kernel cpu
ax1.plot(x, kc, color = 'r', linestyle = '--', label = 'kernel cpu')
# plot the virtual mem
ax2 = ax1.twinx()
ax2.set_ylabel('Memory usage (MB)', fontsize = 9)
ax2.tick_params(axis='y', which='major', labelsize=8)
ax2.tick_params(axis='y', which='minor', labelsize=6)
ax2.plot(x, vm / 1024.0 ** 2, color = 'b', linestyle = ':', label = 'virtual memory')
# plot the rss
ax2.plot(x, rss / 1024.0 ** 2, color = 'k', linestyle = '-.', label = 'resident memory')
mmm = max(tc)
ax1.set_ylim([0, 1.5 * mmm])
ax1.set_xlim([0, x_max]) # align the time axis to accommodate cpu/memory
# it should read a template and then populate the time
if time_label:
import datetime
with open(time_label) as f:
c = 0
for line in f:
fs = line.split('\t')
aa = fs[0].replace(' ', ',').replace('-',',').replace(':',',')
aaa = aa.split(',')
tstamp = (datetime.datetime(int(aaa[0]),int(aaa[1]),int(aaa[2]),int(aaa[3]),int(aaa[4]),int(aaa[5])) - datetime.datetime(1970,1,1)).total_seconds()
tstamp -= stt
if (c % 2 == 0):
delt = 0
co = 'k'
ls = 'dotted'
else:
delt = 50
co = 'm'
ls = 'dashed'
ax1.vlines(tstamp, 0, 1.5 * mmm, colors = co, linestyles=ls)
ax1.text(tstamp - 25, 1 * mmm + delt, fs[1], fontsize = 7)
c += 1
ax1.legend(loc='upper left', shadow=True, prop={'size':8})
ax2.legend(loc='upper right', shadow=True, prop={'size':8})
|
normal
|
{
"blob_id": "85f5f9370896eac17dc72bbbf8d2dd1d7adc3a5b",
"index": 7872,
"step-1": "\"\"\"\n\n\"\"\"\nimport cPickle as pickle\n\n\ndef convert_cpu_stats_to_num_array(cpuStats):\n \"\"\"\n Given a list of statistics (tuples[timestamp, total_cpu, kernel_cpu, vm, rss])\n Return five numarrays\n \"\"\"\n print \"Converting cpus stats into numpy array\"\n c0 = []\n c1 = []\n c2 = []\n c3 = []\n c4 = []\n # TODO - need a pythonic/numpy way for corner turning\n gc.disable()\n for c in cpuStats:\n c0.append(c[0])\n c1.append(c[1])\n c2.append(c[2])\n c3.append(c[3])\n c4.append(c[4])\n gc.enable()\n\n return (np.array(c0), np.array(c1), np.array(c2), np.array(c3), np.array(c4))\n\n\ndef plot_cpu_mem_usage_from_file(cpufile, figfile, stt=None, x_max=None, time_label=None):\n \"\"\"\n Plot CPU and memory usage from a cpu log file\n\n parameters:\n cpufile: the full path of the cpu log file (string)\n figfile: the full path of the plot file (string)\n stt: start time stamp in seconds (Integer,\n None if let it done automatically)\n x_max: the duration of the time axis in seconds (Integer,\n None automatically set)\n time_label: full path to the application activity log (string)\n each line is something like this:\n 2014-08-17 04:44:24 major cycle 3\n 2014-08-17 04:45:44 make image\n\n If set, the plot tries to draw vertical lines along the\n time axis to show these activities This is an experimental\n feature, need more work\n\n \"\"\"\n reList = []\n if os.path.exists(cpufile):\n try:\n pkl_file = open(cpufile, 'rb')\n print 'Loading CPU stats object from file %s' % cpufile\n cpuStatsList = pickle.load(pkl_file)\n pkl_file.close()\n if cpuStatsList == None:\n raise Exception(\"The CPU stats object is None when reading from the file\")\n reList += cpuStatsList\n #return cpuStatsList\n\n except Exception, e:\n ex = str(e)\n import traceback\n print 'Fail to load the CPU stats from file %s: %s' % (cpufile, ex)\n traceback.print_exc()\n raise e\n else:\n print 'Cannot locate the CPU stats file %s' % cpufile\n fig = pl.figure()\n plot_cpu_mem_usage(fig, x_max, reList, stt, standalone = True, time_label = time_label)\n #fig.savefig('/tmp/cpu_mem_usage.pdf')\n fig.savefig(figfile)\n pl.close(fig)\n\n\ndef plot_cpu_mem_usage(fig, cpuStats, x_max = None, stt = None,\n standalone = False, time_label = None):\n if standalone:\n ax1 = fig.add_subplot(111)\n else:\n ax1 = fig.add_subplot(211)\n ax1.set_xlabel('Time (seconds)', fontsize = 9)\n\n ax1.set_ylabel('CPU usage (% of Wall Clock time)', fontsize = 9)\n ax1.set_title('CPU and Memory usage', fontsize=10)\n ax1.tick_params(axis='both', which='major', labelsize=8)\n ax1.tick_params(axis='both', which='minor', labelsize=6)\n\n # get the data in numpy array\n ta, tc, kc, vm, rss = convert_cpu_stats_to_num_array(cpuStats)\n if stt is None:\n stt = ta\n ta -= stt\n st = int(ta[0])\n ed = int(ta[-1])\n if x_max is None:\n x_max = ed\n elif ed > x_max:\n x_max = ed\n\n # create x-axis (whole integer seconds) between st and ed\n # x = np.r_[st:ed + 1]\n x = ta.astype(np.int64)\n\n # plot the total cpu\n ax1.plot(x, tc, color = 'g', linestyle = '-', label = 'total cpu')\n\n # plot the kernel cpu\n ax1.plot(x, kc, color = 'r', linestyle = '--', label = 'kernel cpu')\n\n # plot the virtual mem\n\n ax2 = ax1.twinx()\n ax2.set_ylabel('Memory usage (MB)', fontsize = 9)\n ax2.tick_params(axis='y', which='major', labelsize=8)\n ax2.tick_params(axis='y', which='minor', labelsize=6)\n ax2.plot(x, vm / 1024.0 ** 2, color = 'b', linestyle = ':', label = 'virtual memory')\n\n # plot the rss\n ax2.plot(x, rss / 1024.0 ** 2, color = 'k', linestyle = '-.', label = 'resident memory')\n mmm = max(tc)\n ax1.set_ylim([0, 1.5 * mmm])\n\n ax1.set_xlim([0, x_max]) # align the time axis to accommodate cpu/memory\n\n # it should read a template and then populate the time\n if time_label:\n import datetime\n with open(time_label) as f:\n c = 0\n for line in f:\n fs = line.split('\\t')\n aa = fs[0].replace(' ', ',').replace('-',',').replace(':',',')\n aaa = aa.split(',')\n tstamp = (datetime.datetime(int(aaa[0]),int(aaa[1]),int(aaa[2]),int(aaa[3]),int(aaa[4]),int(aaa[5])) - datetime.datetime(1970,1,1)).total_seconds()\n tstamp -= stt\n if (c % 2 == 0):\n delt = 0\n co = 'k'\n ls = 'dotted'\n else:\n delt = 50\n co = 'm'\n ls = 'dashed'\n ax1.vlines(tstamp, 0, 1.5 * mmm, colors = co, linestyles=ls)\n ax1.text(tstamp - 25, 1 * mmm + delt, fs[1], fontsize = 7)\n c += 1\n ax1.legend(loc='upper left', shadow=True, prop={'size':8})\n ax2.legend(loc='upper right', shadow=True, prop={'size':8})\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def pantip(k, n, arr, path, len):
if len == 0:
if sum(path) == k:
path.reverse()
print(path)
return
path.append(arr[len - 1])
pantip(k, n, arr, path, len - 1)
path.pop()
pantip(k, n, arr, path, len - 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def pantip(k, n, arr, path, len):
if len == 0:
if sum(path) == k:
path.reverse()
print(path)
return
path.append(arr[len - 1])
pantip(k, n, arr, path, len - 1)
path.pop()
pantip(k, n, arr, path, len - 1)
<|reserved_special_token_0|>
print('Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern'.
format(arr, inp[0], pattern))
<|reserved_special_token_1|>
def pantip(k, n, arr, path, len):
if len == 0:
if sum(path) == k:
path.reverse()
print(path)
return
path.append(arr[len - 1])
pantip(k, n, arr, path, len - 1)
path.pop()
pantip(k, n, arr, path, len - 1)
inp = input('Enter Input (Money, Product) : ').split('/')
arr = [int(i) for i in inp[1].split()]
len = len(arr)
pattern = pantip(int(inp[0]), 0, arr, [], len)
print('Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern'.
format(arr, inp[0], pattern))
<|reserved_special_token_1|>
def pantip(k, n, arr, path,len):
if len == 0:
if sum(path)==k:
path.reverse()
print(path)
return
path.append(arr[len-1])
pantip(k,n,arr,path,len-1)
path.pop()
#backtrack
pantip(k,n,arr,path,len-1)
inp = input('Enter Input (Money, Product) : ').split('/')
arr = [int(i) for i in inp[1].split()]
len = len(arr)
pattern = pantip(int(inp[0]), 0, arr, [],len)
print("Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern".format(arr, inp[0], pattern))
|
flexible
|
{
"blob_id": "6cdaf89d97be8f5ef37ab35f2916a36b4c75ddbe",
"index": 7513,
"step-1": "<mask token>\n",
"step-2": "def pantip(k, n, arr, path, len):\n if len == 0:\n if sum(path) == k:\n path.reverse()\n print(path)\n return\n path.append(arr[len - 1])\n pantip(k, n, arr, path, len - 1)\n path.pop()\n pantip(k, n, arr, path, len - 1)\n\n\n<mask token>\n",
"step-3": "def pantip(k, n, arr, path, len):\n if len == 0:\n if sum(path) == k:\n path.reverse()\n print(path)\n return\n path.append(arr[len - 1])\n pantip(k, n, arr, path, len - 1)\n path.pop()\n pantip(k, n, arr, path, len - 1)\n\n\n<mask token>\nprint('Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern'.\n format(arr, inp[0], pattern))\n",
"step-4": "def pantip(k, n, arr, path, len):\n if len == 0:\n if sum(path) == k:\n path.reverse()\n print(path)\n return\n path.append(arr[len - 1])\n pantip(k, n, arr, path, len - 1)\n path.pop()\n pantip(k, n, arr, path, len - 1)\n\n\ninp = input('Enter Input (Money, Product) : ').split('/')\narr = [int(i) for i in inp[1].split()]\nlen = len(arr)\npattern = pantip(int(inp[0]), 0, arr, [], len)\nprint('Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern'.\n format(arr, inp[0], pattern))\n",
"step-5": "def pantip(k, n, arr, path,len):\r\n if len == 0:\r\n if sum(path)==k:\r\n path.reverse()\r\n print(path)\r\n return \r\n path.append(arr[len-1])\r\n pantip(k,n,arr,path,len-1)\r\n path.pop()\r\n #backtrack\r\n pantip(k,n,arr,path,len-1)\r\ninp = input('Enter Input (Money, Product) : ').split('/')\r\narr = [int(i) for i in inp[1].split()]\r\nlen = len(arr)\r\npattern = pantip(int(inp[0]), 0, arr, [],len)\r\nprint(\"Krisada can purchase Product: {0} with: {1} Baht | {2} Pattern\".format(arr, inp[0], pattern))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CommentForm(ModelForm):
class Meta:
model = Comment
<|reserved_special_token_1|>
from django.forms import ModelForm
from django import forms
from models import *
from django.forms.widgets import *
class CommentForm(ModelForm):
class Meta:
model = Comment
<|reserved_special_token_1|>
from django.forms import ModelForm
from django import forms
from models import *
from django.forms.widgets import *
class CommentForm(ModelForm):
# tags = TagField(widget=TagAutocomplete())
class Meta:
model=Comment
# fields = ('title', 'description', 'tags', 'enable_comments', 'owner')#, 'first_card' )
# widgets = {
# 'slug': HiddenInput,
# 'number_of_cards': HiddenInput,
# }
|
flexible
|
{
"blob_id": "81535b43437f9bcb18973ceaa5c3340ad9bd4f0f",
"index": 4170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CommentForm(ModelForm):\n\n\n class Meta:\n model = Comment\n",
"step-3": "from django.forms import ModelForm\nfrom django import forms\nfrom models import *\nfrom django.forms.widgets import *\n\n\nclass CommentForm(ModelForm):\n\n\n class Meta:\n model = Comment\n",
"step-4": "from django.forms import ModelForm\nfrom django import forms\nfrom models import *\nfrom django.forms.widgets import *\n\nclass CommentForm(ModelForm):\n\t# tags = TagField(widget=TagAutocomplete())\n\tclass Meta:\n\t\tmodel=Comment\n\t\t# fields = ('title', 'description', 'tags', 'enable_comments', 'owner')#, 'first_card' )\n\t\t\n\t\t# widgets = {\n\t\t# \t'slug': HiddenInput,\n\t\t# \t'number_of_cards': HiddenInput,\n\t\t# \t}\n\t\t",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
""" Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode):
d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx])
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver)
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights
|
normal
|
{
"blob_id": "af02cd0778e19df7b11145c4863776a1afd1cca6",
"index": 1484,
"step-1": "\"\"\" Implements BCFW for DIFFRAC objectives. \"\"\"\n\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nfrom numpy.linalg import norm as matrix_norm\nimport time\n\n\ndef get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):\n \"\"\"Get feature for a given block.\"\"\"\n if memory_mode == 'RAM':\n feat = feats[block_idx]\n elif memory_mode == 'disk':\n feat = np.load(feats[block_idx])\n else:\n raise ValueError(\n 'Memory mode {} is not supported.'.format(memory_mode))\n\n if bias_value > 0.0:\n feat = np.append(\n feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)\n\n return feat\n\n\ndef get_p_block(p_matrix, block_idx, memory_mode):\n if memory_mode == 'RAM':\n return p_matrix[block_idx]\n elif memory_mode == 'disk':\n return np.load(p_matrix[block_idx])\n else:\n raise ValueError(\n 'Memory mode {} is not supported.'.format(memory_mode))\n\n\ndef compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):\n \"\"\"Precompute the P dictionnary matrix.\"\"\"\n _, d = np.shape(\n get_feat_block(feats, 0, memory_mode, bias_value=bias_value))\n\n # Compute X^TX\n print('Computing xtx...')\n x_t_x = np.zeros([d, d])\n N = 0\n for i in tqdm(range(len(feats))):\n x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)\n x_t_x += np.dot(np.transpose(x), x)\n N += x.shape[0]\n\n # Compute P\n p_matrix = []\n print('Inverting big matrix...')\n inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))\n print('Computing P matrix by block...')\n for i in tqdm(range(len(feats))):\n x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)\n sol = np.dot(inv_mat, np.transpose(x))\n if memory_mode == 'RAM':\n p_matrix.append(np.array(sol))\n else:\n path_x = feats[i]\n base_path, filename = os.path.split(path_x)\n np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)\n p_matrix.append(path_x)\n\n return p_matrix, N\n\n\ndef compute_weights(p_matrix, asgn, memory_mode):\n d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))\n _, k = np.shape(asgn[0])\n\n weights = np.zeros([d, k])\n\n print('Computing weights from scratch...')\n for i in tqdm(range(len(p_matrix))):\n weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])\n\n return weights\n\n\ndef compute_obj(x, y, weights, n_feats):\n return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2\n\n\ndef compute_grad(x, y, weights, n_feats):\n return 1.0 / n_feats * (y - np.dot(x, weights))\n\n\ndef compute_gap(x,\n y,\n weights,\n n_feats,\n cstr,\n cstr_solver,\n opt_y=None,\n grad_y=None):\n\n # Check if we need to call the oracle.\n if opt_y is None:\n grad_y = compute_grad(x, y, weights, n_feats)\n opt_y = cstr_solver.solve(cstr, grad_y)\n\n gap = -np.multiply(opt_y - y, grad_y).sum()\n\n return gap\n\n\ndef sample_block(gaps, block_sampling):\n if block_sampling == 'uniform':\n return np.random.randint(0, len(gaps), 1)[0]\n elif block_sampling == 'gap_sampling':\n if not np.all(gaps >= 0):\n print('Warning: some gaps are negative block {}, value :{}'.format(\n gaps.argmin(), gaps.min()))\n gaps[gaps < 0] = 0.00000001\n\n gap_prob = gaps / gaps.sum()\n return np.random.choice(len(gaps), 1, p=gap_prob)[0]\n\n\ndef display_information(iter,\n max_iter,\n gaps,\n eval_metric,\n objective_value=None,\n verbose='silent',\n prev_time=-1,\n prev_global_time=-1):\n \"\"\"Display information about the training.\"\"\"\n if objective_value is None:\n objective_value = []\n\n if verbose in ['normal', 'heavy']:\n string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(\n iter, max_iter, gaps.sum())\n\n new_time = time.time()\n if prev_time > 0:\n diff_time = int(round(new_time - prev_time))\n string_display += ' ({:d} s)'.format(diff_time)\n if prev_global_time > 0:\n diff_time = int(round(new_time - prev_global_time))\n string_display += ' (Glob. {:d} s)'.format(diff_time)\n\n if eval_metric >= 0:\n string_display += ', Eval metric: {:.2f}'.format(eval_metric)\n\n if objective_value:\n string_display += ', Objective: '\n string_display += ','.join([\n '{}: {:.4E}'.format(key, value)\n for key, value in objective_value.items()\n ])\n\n print(string_display)\n\n\ndef save_asgn_block(path_save_asgn, block_idx, asgn, t):\n np.save(\n os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),\n asgn[block_idx])\n\n\ndef save_xw_block(path_save_asgn, block_idx, x, weights, t):\n np.save(\n os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,\n t)),\n np.dot(x, weights))\n\n\ndef save_gt_block(path_save_asgn, block_idx, gts):\n np.save(\n os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),\n gts[block_idx])\n\n\ndef solver(feats,\n asgn,\n cstrs,\n cstrs_solver,\n gts=None,\n eval_function=None,\n rounding_function=None,\n alpha=1e-4,\n memory_mode='RAM',\n bias_value=-1.0,\n n_iterations=10000,\n block_sampling='uniform',\n verbose='silent',\n gap_frequency=2000,\n eval_frequency=500,\n verbose_frequency=250,\n objective_frequency=250,\n path_save_asgn=None,\n validation_info=None):\n \"\"\"Main solver for DiffracBCFW.\n\n Args:\n feats: Input features as a list (one entry per block).\n asgn: Assignment variables as a list (one entry per block). This provides\n the initialization of the system.\n cstrs: Input constraints as a dictionary (one entry per block).\n cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then\n returns the LP solution.\n gts: A ground truth can be specified if you wish to evaluate your solution.\n eval_function: an eval function method can be provided.\n rounding_function: rounding function.\n alpha: Value of the regularization parameter (lambda in the paper).\n memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).\n bias_value: Value to add for the bias (if negative no bias is added to the features).\n n_iterations: Number of iterations of the solver.\n block_sampling: Method for sampling block.\n verbose: `silent`, `normal`, `heavy`.\n gap_frequency: frequency to recompute all the gaps.\n eval_frequency: frequency to perform evaluation.\n verbose_frequency: frequency to print info.\n objective_frequency: frequency to compute objective (only used if positive).\n path_save_asgn: If not None save asgn at path_save_asgn. None by default.\n validation_info: If not None perform evaluation on validation\n \"\"\"\n\n compute_objective = False\n objective_value = None\n if objective_frequency > 0:\n compute_objective = True\n\n save_asgn = False\n save_ids = []\n if path_save_asgn is not None:\n if not os.path.exists(path_save_asgn):\n os.makedirs(path_save_asgn)\n # Monitor evolution of asgn during optim on a subset of samples.\n save_asgn = True\n n_save_asgn = min(20, len(asgn))\n save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)\n\n # Pre-compute the P matrix.\n p_matrix, n_feats = compute_p_matrix(\n feats, alpha, memory_mode, bias_value=bias_value)\n\n # Compute W.\n weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)\n\n # Init the gaps.\n gaps = np.zeros(len(feats))\n print('Computing init gaps...')\n for block_idx in tqdm(range(len(feats))):\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,\n cstrs[block_idx], cstrs_solver)\n\n if save_asgn and block_idx in save_ids:\n save_asgn_block(path_save_asgn, block_idx, asgn, 0)\n save_xw_block(path_save_asgn, block_idx, x, weights, 0)\n save_gt_block(path_save_asgn, block_idx, gts)\n\n print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))\n\n eval_metric = -1.0\n prev_time = time.time() # init time of iterations\n prev_global_time = prev_time\n for t in range(n_iterations):\n if eval_frequency > 0 and t % eval_frequency == 0:\n # Evaluation.\n if eval_function is not None and gts is not None:\n print('Performing evaluation...')\n eval_metric = eval_function.evaluate(asgn, gts, weights, feats,\n rounding_function, cstrs)\n if validation_info is not None:\n gts_val = validation_info['gts']\n feats_val = validation_info['feats']\n eval_function.evaluate(None, gts_val, weights, feats_val,\n rounding_function, None)\n else:\n eval_metric = -1.0\n\n if compute_objective and t % objective_frequency == 0:\n print('Computing objective...')\n objective_value = {}\n # Compute the diffrac objective.\n dfrac_obj = 0.0\n # Data dependent term: 1.0 / N * ||X * W - Y||_2^2\n for block_idx in range(len(feats)):\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)\n\n # Regularization term: \\alpha * || W ||_2^2\n dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2\n objective_value['dfrac'] = dfrac_obj\n\n # Print information.\n if t % verbose_frequency == 0:\n display_information(t, n_iterations, gaps, eval_metric,\n objective_value, verbose, prev_time, prev_global_time)\n prev_time = time.time()\n\n # Sample a block.\n block_idx = sample_block(gaps, block_sampling)\n # Compute gradient.\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n y = asgn[block_idx]\n\n grad_y = compute_grad(x, y, weights, n_feats)\n\n opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)\n gaps[block_idx] = compute_gap(x, y, weights, n_feats,\n cstrs[block_idx], cstrs_solver,\n opt_y, grad_y)\n\n # Step size computation.\n p = get_p_block(p_matrix, block_idx, memory_mode)\n dir_y = opt_y - y\n gamma_n = gaps[block_idx]\n\n gamma_d = 1.0 / n_feats * np.multiply(\n dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()\n\n gamma = min(1.0, gamma_n / gamma_d)\n # gamma should always be positive.\n if gamma < 0:\n print 'Warning: gamma = {}, gap_i = {}'.format(\n gamma, gaps[block_idx])\n gamma = 0.0\n\n # Update variables.\n asgn[block_idx] += gamma * dir_y\n weights += gamma * np.dot(p, dir_y)\n\n if save_asgn and block_idx in save_ids:\n save_asgn_block(path_save_asgn, block_idx, asgn, t)\n save_xw_block(path_save_asgn, block_idx, x, weights, t)\n\n # Update gaps if needed.\n if (t + 1) % gap_frequency == 0:\n print('Recomputing gaps...')\n for block_idx in tqdm(range(len(feats))):\n x = get_feat_block(\n feats, block_idx, memory_mode, bias_value=bias_value)\n gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,\n n_feats, cstrs[block_idx],\n cstrs_solver)\n display_information(t, n_iterations, gaps, eval_metric,\n objective_value, verbose)\n\n return asgn, weights\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def merge_sort(items, temp, low, high):
if high <= low:
return None
mid = low + (high - low) // 2
merge_sort(items, temp, low, mid)
merge_sort(items, temp, mid + 1, high)
merge(items, temp, low, mid, high)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge(items, temp, low, mid, high):
i = low
j = mid + 1
for k in range(low, high + 1):
if i > mid:
temp[k] = items[j]
j += 1
elif j > high:
temp[k] = items[i]
i += 1
elif items[j] < items[i]:
temp[k] = items[j]
j += 1
else:
temp[k] = items[i]
i += 1
for k in range(low, high + 1):
items[k] = temp[k]
def merge_sort(items, temp, low, high):
if high <= low:
return None
mid = low + (high - low) // 2
merge_sort(items, temp, low, mid)
merge_sort(items, temp, mid + 1, high)
merge(items, temp, low, mid, high)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def merge(items, temp, low, mid, high):
i = low
j = mid + 1
for k in range(low, high + 1):
if i > mid:
temp[k] = items[j]
j += 1
elif j > high:
temp[k] = items[i]
i += 1
elif items[j] < items[i]:
temp[k] = items[j]
j += 1
else:
temp[k] = items[i]
i += 1
for k in range(low, high + 1):
items[k] = temp[k]
def merge_sort(items, temp, low, high):
if high <= low:
return None
mid = low + (high - low) // 2
merge_sort(items, temp, low, mid)
merge_sort(items, temp, mid + 1, high)
merge(items, temp, low, mid, high)
if __name__ == '__main__':
items = [5, 4, 3, 3, 5, 6, 4, 4, 3, 2]
temp = [None] * len(items)
print('정렬 전 : \t', end='')
print(items)
merge_sort(items, temp, 0, len(items) - 1)
print('정렬 전 : \t', end='')
print(items)
<|reserved_special_token_1|>
def merge(items, temp, low, mid, high):
i = low
j = mid + 1
for k in range(low, high+1):
if i > mid:
# 왼쪽 리스트의 순회를 마쳤음
# 남은 오른쪽 리스트의 원소들은 모두 왼쪽 리스트 원소보다 작음
temp[k] = items[j]
# 뒤에 나머지는 정렬되어있으니 그대로 넣기
j += 1
elif j > high:
# 오른쪽 리스트의 순회를 마쳤음
# 남은 왼쪽 리스트 원소들은 모두 오른쪽 리스트 원소보다 작음
temp[k] = items[i]
# 앞의 나머지는 정렬되어있으니 그대로 넣기
i += 1
elif items[j] < items[i]:
# 왼쪽 리스트의 원소가 더 큰 경우
# 오른쪽 리스트의 원소를 정렬리스트에 넣을거임
temp[k] = items[j]
j += 1
# 오른쪽 리스트 다음 원소를 비교해보자
else:
# 왼쪽 리스트의 원소가 더 작거나 같은 경우
# 왼쪽 리스트의 원소를 정렬리스트에 넣을거임
temp[k] = items[i]
i += 1
# 왼쪽 리스트 다음 원소를 비교해라
for k in range(low, high+1):
items[k] = temp[k]
# 이제 정렬해놓은거 원래 리스트로 복사해라
def merge_sort(items, temp, low, high):
if high <= low:
return None
# 다 정렬했으면 이제 끝내라
mid = low + (high - low)//2
# low, high, mid 는 값이 아니라 index 값임
merge_sort(items, temp, low, mid)
merge_sort(items, temp, mid+1, high)
merge(items, temp, low, mid, high)
if __name__ == '__main__':
items = [5,4,3,3,5,6,4,4,3,2]
temp = [None]*len(items)
print('정렬 전 : \t', end ='')
print(items)
merge_sort(items, temp, 0, len(items)-1)
print('정렬 전 : \t', end='')
print(items)
|
flexible
|
{
"blob_id": "9ab119b32ceac370b744658e5fa679292609373a",
"index": 2517,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n mid = low + (high - low) // 2\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid + 1, high)\n merge(items, temp, low, mid, high)\n\n\n<mask token>\n",
"step-3": "def merge(items, temp, low, mid, high):\n i = low\n j = mid + 1\n for k in range(low, high + 1):\n if i > mid:\n temp[k] = items[j]\n j += 1\n elif j > high:\n temp[k] = items[i]\n i += 1\n elif items[j] < items[i]:\n temp[k] = items[j]\n j += 1\n else:\n temp[k] = items[i]\n i += 1\n for k in range(low, high + 1):\n items[k] = temp[k]\n\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n mid = low + (high - low) // 2\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid + 1, high)\n merge(items, temp, low, mid, high)\n\n\n<mask token>\n",
"step-4": "def merge(items, temp, low, mid, high):\n i = low\n j = mid + 1\n for k in range(low, high + 1):\n if i > mid:\n temp[k] = items[j]\n j += 1\n elif j > high:\n temp[k] = items[i]\n i += 1\n elif items[j] < items[i]:\n temp[k] = items[j]\n j += 1\n else:\n temp[k] = items[i]\n i += 1\n for k in range(low, high + 1):\n items[k] = temp[k]\n\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n mid = low + (high - low) // 2\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid + 1, high)\n merge(items, temp, low, mid, high)\n\n\nif __name__ == '__main__':\n items = [5, 4, 3, 3, 5, 6, 4, 4, 3, 2]\n temp = [None] * len(items)\n print('정렬 전 : \\t', end='')\n print(items)\n merge_sort(items, temp, 0, len(items) - 1)\n print('정렬 전 : \\t', end='')\n print(items)\n",
"step-5": "def merge(items, temp, low, mid, high):\n i = low\n j = mid + 1\n for k in range(low, high+1):\n if i > mid:\n # 왼쪽 리스트의 순회를 마쳤음\n # 남은 오른쪽 리스트의 원소들은 모두 왼쪽 리스트 원소보다 작음\n temp[k] = items[j]\n # 뒤에 나머지는 정렬되어있으니 그대로 넣기\n j += 1\n elif j > high:\n # 오른쪽 리스트의 순회를 마쳤음\n # 남은 왼쪽 리스트 원소들은 모두 오른쪽 리스트 원소보다 작음\n temp[k] = items[i]\n # 앞의 나머지는 정렬되어있으니 그대로 넣기\n i += 1\n elif items[j] < items[i]:\n # 왼쪽 리스트의 원소가 더 큰 경우\n # 오른쪽 리스트의 원소를 정렬리스트에 넣을거임\n temp[k] = items[j]\n j += 1\n # 오른쪽 리스트 다음 원소를 비교해보자\n else:\n # 왼쪽 리스트의 원소가 더 작거나 같은 경우\n # 왼쪽 리스트의 원소를 정렬리스트에 넣을거임\n temp[k] = items[i]\n i += 1\n # 왼쪽 리스트 다음 원소를 비교해라\n for k in range(low, high+1):\n items[k] = temp[k]\n # 이제 정렬해놓은거 원래 리스트로 복사해라\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n # 다 정렬했으면 이제 끝내라\n mid = low + (high - low)//2\n # low, high, mid 는 값이 아니라 index 값임\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid+1, high)\n merge(items, temp, low, mid, high)\n\nif __name__ == '__main__':\n items = [5,4,3,3,5,6,4,4,3,2]\n temp = [None]*len(items)\n print('정렬 전 : \\t', end ='')\n print(items)\n merge_sort(items, temp, 0, len(items)-1)\n print('정렬 전 : \\t', end='')\n print(items)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)
# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from osv import osv, fields
class SaleOrder(osv.osv):
'''
Sale Order
'''
_inherit = 'sale.order'
_columns = {
'coupon_code':fields.char('Promo Coupon Code', size=20),
}
def apply_promotions(self, cursor, user, ids, context=None):
"""
Applies the promotions to the given records
@param cursor: Database Cursor
@param user: ID of User
@param ids: ID of current record.
@param context: Context(no direct use).
"""
promotions_obj = self.pool.get('promos.rules')
for order_id in ids:
promotions_obj.apply_promotions(cursor, user,
order_id, context=None)
return True
SaleOrder()
class SaleOrderLine(osv.osv):
'''
Sale Order Line
'''
_inherit = "sale.order.line"
_columns = {
'promotion_line':fields.boolean(
"Promotion Line",
help="Indicates if the line was created by promotions"
)
}
SaleOrderLine()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
normal
|
{
"blob_id": "d9538c030c0225c4255100da70d6bf23f550a64f",
"index": 734,
"step-1": "<mask token>\n\n\nclass SaleOrderLine(osv.osv):\n <mask token>\n _inherit = 'sale.order.line'\n _columns = {'promotion_line': fields.boolean('Promotion Line', help=\n 'Indicates if the line was created by promotions')}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SaleOrder(osv.osv):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass SaleOrderLine(osv.osv):\n \"\"\"\n Sale Order Line\n \"\"\"\n _inherit = 'sale.order.line'\n _columns = {'promotion_line': fields.boolean('Promotion Line', help=\n 'Indicates if the line was created by promotions')}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SaleOrder(osv.osv):\n <mask token>\n _inherit = 'sale.order'\n _columns = {'coupon_code': fields.char('Promo Coupon Code', size=20)}\n\n def apply_promotions(self, cursor, user, ids, context=None):\n \"\"\"\n Applies the promotions to the given records\n @param cursor: Database Cursor\n @param user: ID of User\n @param ids: ID of current record.\n @param context: Context(no direct use).\n \"\"\"\n promotions_obj = self.pool.get('promos.rules')\n for order_id in ids:\n promotions_obj.apply_promotions(cursor, user, order_id, context\n =None)\n return True\n\n\n<mask token>\n\n\nclass SaleOrderLine(osv.osv):\n \"\"\"\n Sale Order Line\n \"\"\"\n _inherit = 'sale.order.line'\n _columns = {'promotion_line': fields.boolean('Promotion Line', help=\n 'Indicates if the line was created by promotions')}\n\n\n<mask token>\n",
"step-4": "from osv import osv, fields\n\n\nclass SaleOrder(osv.osv):\n \"\"\"\n Sale Order\n \"\"\"\n _inherit = 'sale.order'\n _columns = {'coupon_code': fields.char('Promo Coupon Code', size=20)}\n\n def apply_promotions(self, cursor, user, ids, context=None):\n \"\"\"\n Applies the promotions to the given records\n @param cursor: Database Cursor\n @param user: ID of User\n @param ids: ID of current record.\n @param context: Context(no direct use).\n \"\"\"\n promotions_obj = self.pool.get('promos.rules')\n for order_id in ids:\n promotions_obj.apply_promotions(cursor, user, order_id, context\n =None)\n return True\n\n\nSaleOrder()\n\n\nclass SaleOrderLine(osv.osv):\n \"\"\"\n Sale Order Line\n \"\"\"\n _inherit = 'sale.order.line'\n _columns = {'promotion_line': fields.boolean('Promotion Line', help=\n 'Indicates if the line was created by promotions')}\n\n\nSaleOrderLine()\n",
"step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>)\n# Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>\n#\n##############################################################################\nfrom osv import osv, fields\n\nclass SaleOrder(osv.osv):\n '''\n Sale Order\n '''\n _inherit = 'sale.order'\n \n _columns = {\n 'coupon_code':fields.char('Promo Coupon Code', size=20),\n }\n \n def apply_promotions(self, cursor, user, ids, context=None):\n \"\"\"\n Applies the promotions to the given records\n @param cursor: Database Cursor\n @param user: ID of User\n @param ids: ID of current record.\n @param context: Context(no direct use).\n \"\"\"\n promotions_obj = self.pool.get('promos.rules')\n for order_id in ids:\n promotions_obj.apply_promotions(cursor, user, \n order_id, context=None)\n \n return True\n \nSaleOrder()\n\n\nclass SaleOrderLine(osv.osv):\n '''\n Sale Order Line\n '''\n _inherit = \"sale.order.line\"\n \n _columns = {\n 'promotion_line':fields.boolean(\n \"Promotion Line\",\n help=\"Indicates if the line was created by promotions\"\n )\n }\nSaleOrderLine()\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:",
"step-ids": [
2,
4,
6,
9,
10
]
}
|
[
2,
4,
6,
9,
10
] |
"""
Copyright (C) 2014, Jill Huchital
"""
# test comment
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
# return render_template('index.html', greeting='here we are then')
return "index"
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods = ['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods = ['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods = ['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods = ['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods = ['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods = ['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods = ['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1,
'api_call': api_call,
'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
# debug = True makes the server restart when the Python files change. TODO: make it
# depend on whether we're running locally or in production.
ALL_DBS = connect_to_db()
# create_playlists(ALL_DBS)
app.run(debug = True)
|
normal
|
{
"blob_id": "5193de15052f81460a23d993cfa039fa90c9de5e",
"index": 897,
"step-1": "<mask token>\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n<mask token>\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n<mask token>\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n<mask token>\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return 'index'\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n<mask token>\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n@app.route('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/')\ndef index():\n return 'index'\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n@app.route('/tools/')\ndef tools():\n return render_template('tools.html')\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n@app.route('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\nif __name__ == '__main__':\n ALL_DBS = connect_to_db()\n app.run(debug=True)\n",
"step-4": "<mask token>\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nfrom playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics\nfrom db import connect_to_db\nALL_DBS = None\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return 'index'\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n@app.route('/tools/')\ndef tools():\n return render_template('tools.html')\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n@app.route('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\nif __name__ == '__main__':\n ALL_DBS = connect_to_db()\n app.run(debug=True)\n",
"step-5": "\"\"\"\nCopyright (C) 2014, Jill Huchital\n\"\"\"\n\n# test comment\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\n\nfrom playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics\nfrom db import connect_to_db\n\nALL_DBS = None\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n # return render_template('index.html', greeting='here we are then')\n return \"index\"\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n@app.route('/tools/')\ndef tools():\n return render_template('tools.html')\n\n@app.route('/api/1.0/create_playlists', methods = ['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n@app.route('/api/1.0/get_playlists', methods = ['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n@app.route('/api/1.0/get_all_categories', methods = ['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n@app.route('/api/1.0/get_all_topics', methods = ['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n@app.route('/api/1.0/add_category', methods = ['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n@app.route('/api/1.0/add_topic', methods = ['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n@app.route('/api/1.0/<string:api_call>', methods = ['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1,\n 'api_call': api_call,\n 'param_2': param2}\n return jsonify(retval)\n\nif __name__ == '__main__':\n # debug = True makes the server restart when the Python files change. TODO: make it\n # depend on whether we're running locally or in production.\n ALL_DBS = connect_to_db()\n # create_playlists(ALL_DBS)\n app.run(debug = True)\n",
"step-ids": [
6,
9,
11,
13,
14
]
}
|
[
6,
9,
11,
13,
14
] |
from PIL import Image
from pdf2image import convert_from_path
import glob
from pathlib import Path
import shutil, os
from docx import Document
import fnmatch
import re
import shutil
def find_files_ignore_case(which, where='.'):
'''Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive.'''
# TODO: recursive param with walk() filtering
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
return [name for name in os.listdir(where) if rule.match(name)]
def crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):
img = Image.open(file)
x, y = img.size
box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top - crop_bottom)
crop = img.crop(box)
crop.save(file)
def create_empty_folder(path):
'''Create a folder. Delete content if exists'''
Path(path).mkdir(parents=True, exist_ok=True)
# Remove existing files
existing_files = find_files_ignore_case(os.path.join(path, '*'))
for ef in existing_files:
os.remove(ef)
def convert_pdf_to_images(file):
'''Convert a PDF file into images and save to folder of same name
Return folder which contains the images
'''
# Create directory for each file
folder = os.path.splitext(file)[0]
create_empty_folder(folder)
# Convert PDF to images into the directory
images = convert_from_path(file)
for i, image in enumerate(images):
file_name = 'Z{:05}.jpg'.format(i+1)
image.save(os.path.join(folder, file_name), 'JPEG')
return folder
def get_file_name_prefix(filename):
with open('file_name_prefixes.txt') as f:
for line in f:
line = line.strip()
if filename.lower().startswith(line.lower()):
return line.strip()
return None
import errno, os, stat, shutil
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
func(path)
else:
raise
if __name__ == '__main__':
cur_folder = os.path.abspath('')
# Convert PDFs to Images
print('Convert PDFs to images...')
files = find_files_ignore_case('*.pdf')
for pdf_file in files:
pdf_file = os.path.join(cur_folder, pdf_file)
print(pdf_file)
folder = convert_pdf_to_images(pdf_file)
# Crop images
print('Crop images...')
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print(folder)
images = find_files_ignore_case('*.jpg', folder)
images.sort()
print(images)
for image_file in images:
try:
image_file = os.path.join(folder, image_file)
crop_image_center(image_file, crop_left=160,
crop_right=-40, crop_top=100, crop_bottom=20)
except:
pass
# Copy Image *.jpg From Reference to Folder
files = find_files_ignore_case('*.pdf')
for file in files:
print(file)
folder = os.path.splitext(file)[0]
file_prefix = get_file_name_prefix(file)
print(file_prefix)
# Copy Image *.jpg From Reference to Folder
source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix), 'Reference')
for f in source_files:
f = os.path.join('Reference', f)
shutil.copy(f, folder)
# Insert Images to Word
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
word_file = folder+".docx"
# Copy from template docx
file_prefix = get_file_name_prefix(file)
files = find_files_ignore_case('{}*.docx'.format(file_prefix), 'Reference')
print(file, file_prefix, files)
if files:
document = Document(os.path.join('Reference', files[0]))
document.add_section()
else:
document = Document()
document.save(word_file)
section = document.sections[0]
# width = section.page_width - section.left_margin - section.right_margin
height = section.page_height - section.top_margin - section.bottom_margin
images = find_files_ignore_case('*.jpg', folder)
for image_file in images:
image_file = os.path.join(folder, image_file)
# document.add_picture(image_file, width=width)
document.add_picture(image_file, height=height)
document.save(word_file)
# Delete folders including its images
files = find_files_ignore_case('*.pdf')
for file in files:
folder = os.path.splitext(file)[0]
print('Deleting', folder, os.path.isdir(folder))
try:
files_in_dir = os.listdir(folder)
for file in files_in_dir: # loop to delete each file in folder
os.remove(os.path.join(folder,file))
#os.rmdir(folder)
shutil.rmtree(folder, ignore_errors=False, onerror=handleRemoveReadonly)
except Exception as ex:
print('Error deleting', folder, ex)
|
normal
|
{
"blob_id": "a9876c61578a53f29865062c0915db622aaaba72",
"index": 6916,
"step-1": "<mask token>\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\n<mask token>\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\n<mask token>\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_files_ignore_case(which, where='.'):\n \"\"\"Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.\"\"\"\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\ndef convert_pdf_to_images(file):\n \"\"\"Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n \"\"\"\n folder = os.path.splitext(file)[0]\n create_empty_folder(folder)\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i + 1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n return folder\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\n<mask token>\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_files_ignore_case(which, where='.'):\n \"\"\"Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.\"\"\"\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\ndef convert_pdf_to_images(file):\n \"\"\"Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n \"\"\"\n folder = os.path.splitext(file)[0]\n create_empty_folder(folder)\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i + 1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n return folder\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\n<mask token>\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\nif __name__ == '__main__':\n cur_folder = os.path.abspath('')\n print('Convert PDFs to images...')\n files = find_files_ignore_case('*.pdf')\n for pdf_file in files:\n pdf_file = os.path.join(cur_folder, pdf_file)\n print(pdf_file)\n folder = convert_pdf_to_images(pdf_file)\n print('Crop images...')\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print(folder)\n images = find_files_ignore_case('*.jpg', folder)\n images.sort()\n print(images)\n for image_file in images:\n try:\n image_file = os.path.join(folder, image_file)\n crop_image_center(image_file, crop_left=160, crop_right=-40,\n crop_top=100, crop_bottom=20)\n except:\n pass\n files = find_files_ignore_case('*.pdf')\n for file in files:\n print(file)\n folder = os.path.splitext(file)[0]\n file_prefix = get_file_name_prefix(file)\n print(file_prefix)\n source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix),\n 'Reference')\n for f in source_files:\n f = os.path.join('Reference', f)\n shutil.copy(f, folder)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n word_file = folder + '.docx'\n file_prefix = get_file_name_prefix(file)\n files = find_files_ignore_case('{}*.docx'.format(file_prefix),\n 'Reference')\n print(file, file_prefix, files)\n if files:\n document = Document(os.path.join('Reference', files[0]))\n document.add_section()\n else:\n document = Document()\n document.save(word_file)\n section = document.sections[0]\n height = (section.page_height - section.top_margin - section.\n bottom_margin)\n images = find_files_ignore_case('*.jpg', folder)\n for image_file in images:\n image_file = os.path.join(folder, image_file)\n document.add_picture(image_file, height=height)\n document.save(word_file)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print('Deleting', folder, os.path.isdir(folder))\n try:\n files_in_dir = os.listdir(folder)\n for file in files_in_dir:\n os.remove(os.path.join(folder, file))\n shutil.rmtree(folder, ignore_errors=False, onerror=\n handleRemoveReadonly)\n except Exception as ex:\n print('Error deleting', folder, ex)\n",
"step-4": "from PIL import Image\nfrom pdf2image import convert_from_path\nimport glob\nfrom pathlib import Path\nimport shutil, os\nfrom docx import Document\nimport fnmatch\nimport re\nimport shutil\n\n\ndef find_files_ignore_case(which, where='.'):\n \"\"\"Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.\"\"\"\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top -\n crop_bottom)\n crop = img.crop(box)\n crop.save(file)\n\n\ndef create_empty_folder(path):\n \"\"\"Create a folder. Delete content if exists\"\"\"\n Path(path).mkdir(parents=True, exist_ok=True)\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\n\ndef convert_pdf_to_images(file):\n \"\"\"Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n \"\"\"\n folder = os.path.splitext(file)[0]\n create_empty_folder(folder)\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i + 1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n return folder\n\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\n\nimport errno, os, stat, shutil\n\n\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n\n\nif __name__ == '__main__':\n cur_folder = os.path.abspath('')\n print('Convert PDFs to images...')\n files = find_files_ignore_case('*.pdf')\n for pdf_file in files:\n pdf_file = os.path.join(cur_folder, pdf_file)\n print(pdf_file)\n folder = convert_pdf_to_images(pdf_file)\n print('Crop images...')\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print(folder)\n images = find_files_ignore_case('*.jpg', folder)\n images.sort()\n print(images)\n for image_file in images:\n try:\n image_file = os.path.join(folder, image_file)\n crop_image_center(image_file, crop_left=160, crop_right=-40,\n crop_top=100, crop_bottom=20)\n except:\n pass\n files = find_files_ignore_case('*.pdf')\n for file in files:\n print(file)\n folder = os.path.splitext(file)[0]\n file_prefix = get_file_name_prefix(file)\n print(file_prefix)\n source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix),\n 'Reference')\n for f in source_files:\n f = os.path.join('Reference', f)\n shutil.copy(f, folder)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n word_file = folder + '.docx'\n file_prefix = get_file_name_prefix(file)\n files = find_files_ignore_case('{}*.docx'.format(file_prefix),\n 'Reference')\n print(file, file_prefix, files)\n if files:\n document = Document(os.path.join('Reference', files[0]))\n document.add_section()\n else:\n document = Document()\n document.save(word_file)\n section = document.sections[0]\n height = (section.page_height - section.top_margin - section.\n bottom_margin)\n images = find_files_ignore_case('*.jpg', folder)\n for image_file in images:\n image_file = os.path.join(folder, image_file)\n document.add_picture(image_file, height=height)\n document.save(word_file)\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print('Deleting', folder, os.path.isdir(folder))\n try:\n files_in_dir = os.listdir(folder)\n for file in files_in_dir:\n os.remove(os.path.join(folder, file))\n shutil.rmtree(folder, ignore_errors=False, onerror=\n handleRemoveReadonly)\n except Exception as ex:\n print('Error deleting', folder, ex)\n",
"step-5": "from PIL import Image\nfrom pdf2image import convert_from_path\nimport glob \nfrom pathlib import Path\nimport shutil, os\nfrom docx import Document\nimport fnmatch\nimport re\nimport shutil\n\n\ndef find_files_ignore_case(which, where='.'):\n '''Returns list of filenames from `where` path matched by 'which'\n shell pattern. Matching is case-insensitive.'''\n \n # TODO: recursive param with walk() filtering\n rule = re.compile(fnmatch.translate(which), re.IGNORECASE)\n return [name for name in os.listdir(where) if rule.match(name)]\n\n\ndef crop_image_center(file, crop_left, crop_right, crop_top, crop_bottom):\n img = Image.open(file)\n x, y = img.size\n box = (crop_left, crop_top, x - crop_left - crop_right, y - crop_top - crop_bottom)\n crop = img.crop(box) \n crop.save(file)\n\ndef create_empty_folder(path):\n '''Create a folder. Delete content if exists'''\n Path(path).mkdir(parents=True, exist_ok=True)\n \n # Remove existing files\n existing_files = find_files_ignore_case(os.path.join(path, '*'))\n for ef in existing_files:\n os.remove(ef)\n\ndef convert_pdf_to_images(file):\n '''Convert a PDF file into images and save to folder of same name\n Return folder which contains the images\n '''\n # Create directory for each file\n folder = os.path.splitext(file)[0] \n create_empty_folder(folder)\n \n # Convert PDF to images into the directory\n images = convert_from_path(file)\n for i, image in enumerate(images):\n file_name = 'Z{:05}.jpg'.format(i+1)\n image.save(os.path.join(folder, file_name), 'JPEG')\n\n return folder\n\ndef get_file_name_prefix(filename):\n with open('file_name_prefixes.txt') as f:\n for line in f:\n line = line.strip()\n if filename.lower().startswith(line.lower()):\n return line.strip()\n return None\n\nimport errno, os, stat, shutil\ndef handleRemoveReadonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777\n func(path)\n else:\n raise\n \n\nif __name__ == '__main__':\n cur_folder = os.path.abspath('')\n \n # Convert PDFs to Images\n print('Convert PDFs to images...')\n files = find_files_ignore_case('*.pdf')\n for pdf_file in files:\n pdf_file = os.path.join(cur_folder, pdf_file)\n print(pdf_file)\n folder = convert_pdf_to_images(pdf_file)\n \n # Crop images\n print('Crop images...')\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print(folder)\n images = find_files_ignore_case('*.jpg', folder)\n images.sort()\n print(images)\n for image_file in images:\n try:\n image_file = os.path.join(folder, image_file)\n crop_image_center(image_file, crop_left=160, \n crop_right=-40, crop_top=100, crop_bottom=20)\n except:\n pass\n \n # Copy Image *.jpg From Reference to Folder\n files = find_files_ignore_case('*.pdf')\n for file in files:\n print(file)\n folder = os.path.splitext(file)[0]\n file_prefix = get_file_name_prefix(file)\n print(file_prefix)\n\n # Copy Image *.jpg From Reference to Folder\n source_files = find_files_ignore_case('{}*.jpg'.format(file_prefix), 'Reference')\n\n for f in source_files:\n f = os.path.join('Reference', f)\n shutil.copy(f, folder)\n \n # Insert Images to Word\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0] \n word_file = folder+\".docx\"\n\n # Copy from template docx\n file_prefix = get_file_name_prefix(file)\n files = find_files_ignore_case('{}*.docx'.format(file_prefix), 'Reference')\n print(file, file_prefix, files)\n if files:\n document = Document(os.path.join('Reference', files[0]))\n document.add_section()\n else:\n document = Document()\n document.save(word_file)\n\n section = document.sections[0]\n # width = section.page_width - section.left_margin - section.right_margin\n height = section.page_height - section.top_margin - section.bottom_margin\n\n images = find_files_ignore_case('*.jpg', folder)\n for image_file in images:\n image_file = os.path.join(folder, image_file)\n # document.add_picture(image_file, width=width)\n document.add_picture(image_file, height=height)\n\n document.save(word_file)\n \n # Delete folders including its images\n files = find_files_ignore_case('*.pdf')\n for file in files:\n folder = os.path.splitext(file)[0]\n print('Deleting', folder, os.path.isdir(folder))\n try:\n files_in_dir = os.listdir(folder) \n for file in files_in_dir: # loop to delete each file in folder\n os.remove(os.path.join(folder,file))\n #os.rmdir(folder)\n shutil.rmtree(folder, ignore_errors=False, onerror=handleRemoveReadonly)\n except Exception as ex:\n print('Error deleting', folder, ex)\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
global window
global _form
print('You are using Python {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor, sys.version_info.micro))
window = tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
global window
global _form
print('You are using Python {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor, sys.version_info.micro))
window = tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import tkinter as tk
import tkinter.ttk as ttk
import GUIForm
import sys
def main():
global window
global _form
print('You are using Python {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor, sys.version_info.micro))
window = tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import tkinter as tk
import tkinter.ttk as ttk
import GUIForm
import sys
def main():
global window
global _form
print("You are using Python {}.{}.{}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro))
window=tk.Tk()
GUIForm.BuildInterface(window)
window.mainloop()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "ca5057a5fdfef0edf4cf0c3ff3e2a371907ca4ee",
"index": 1270,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n global window\n global _form\n print('You are using Python {}.{}.{}'.format(sys.version_info.major,\n sys.version_info.minor, sys.version_info.micro))\n window = tk.Tk()\n GUIForm.BuildInterface(window)\n window.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n global window\n global _form\n print('You are using Python {}.{}.{}'.format(sys.version_info.major,\n sys.version_info.minor, sys.version_info.micro))\n window = tk.Tk()\n GUIForm.BuildInterface(window)\n window.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import tkinter as tk\nimport tkinter.ttk as ttk\nimport GUIForm\nimport sys\n\n\ndef main():\n global window\n global _form\n print('You are using Python {}.{}.{}'.format(sys.version_info.major,\n sys.version_info.minor, sys.version_info.micro))\n window = tk.Tk()\n GUIForm.BuildInterface(window)\n window.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import tkinter as tk\nimport tkinter.ttk as ttk\nimport GUIForm\nimport sys\n\ndef main():\n global window\n global _form\n\n print(\"You are using Python {}.{}.{}\".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro))\n\n window=tk.Tk()\n GUIForm.BuildInterface(window)\n window.mainloop()\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import math
import random
from numpy.linalg import inv
from scipy.optimize import minimize
from Util import to_vector
class TS_RLR:
def __init__(self, alpha):
self.d = 6
self.k = 6
self.alpha = alpha
self.batch_size = 1000
self.training_size = 1000
self.impressions = 0
self.batch_ids = list([])
self.batch_clicks = np.array([])
self.articles_1_d = np.array([])
self.article_ids = dict()
self.bad_articles = set()
self.mu = np.zeros(self.d)
self.q = self.alpha * np.ones(self.d)
def sigmoid(self, x):
# print(x)
return 1.0 / (1.0 + math.exp(-x))
def add_new_article(self, line):
article_id = int(line.split(" ")[0])
if article_id in self.bad_articles:
return -1
if article_id not in self.article_ids:
try:
article = to_vector(line)
except IndexError:
self.bad_articles.add(article_id)
return -1
self.article_ids[article_id] = len(self.article_ids)
self.articles_1_d = np.append(self.articles_1_d, article).reshape([len(self.article_ids), self.d])
return article_id
def to_minimize(self, w):
return 1/2 * sum (self.q * (w - self.mu) * (w - self.mu)) + sum(np.log(1+np.exp(-self.batch_clicks * w.dot(self.batch_articles))))
def update(self, user, selected_article, click):
self.impressions += 1
self.batch_ids.append(self.article_ids[selected_article])
self.batch_clicks = np.append(self.batch_clicks, click)
if self.impressions % self.batch_size == 0:
w = np.random.normal(0, 1, self.d)
self.batch_articles = self.articles_1_d[self.batch_ids].reshape([self.d, self.batch_size])
res = minimize(self.to_minimize, w, method='nelder-mead', options={'xtol': 1e-8, 'disp': False})
self.m = res.x
p = 1/(1 + np.exp(- self.m.dot(self.batch_articles)))
for i in np.arange(0, self.d):
self.q[i] += sum(self.batch_articles[i] * self.batch_articles[i] * p[i] * (1-p[i]))
self.batch_ids = list([])
self.batch_clicks = np.array([])
def warmup(self, file):
pass
def select(self, user, pre_selected_article, lines, total_impressions, click):
selected_article = -1
warmup = False
if self.impressions < self.training_size:
for line in lines:
self.add_new_article(line)
self.update(user, pre_selected_article, click)
selected_article = pre_selected_article
warmup = True
else:
best_value = 0
best_value_articles = list()
sample_w = np.random.multivariate_normal(self.mu, np.diag(1/self.q))
for line in lines:
article_id = self.add_new_article(line)
if article_id == -1 :
continue
a_id = self.article_ids[article_id]
article = self.articles_1_d[a_id]
cur_value = self.sigmoid(sample_w.dot(article))
if cur_value > best_value:
best_value_articles = list([article_id])
best_value = cur_value
elif cur_value == best_value:
best_value_articles.append(article_id)
index = random.randint(0, len(best_value_articles)-1)
selected_article = best_value_articles[index]
return selected_article, warmup
|
normal
|
{
"blob_id": "49df9db508637ce5914aa6591178a03c609b6bc7",
"index": 659,
"step-1": "<mask token>\n\n\nclass TS_RLR:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TS_RLR:\n\n def __init__(self, alpha):\n self.d = 6\n self.k = 6\n self.alpha = alpha\n self.batch_size = 1000\n self.training_size = 1000\n self.impressions = 0\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n self.articles_1_d = np.array([])\n self.article_ids = dict()\n self.bad_articles = set()\n self.mu = np.zeros(self.d)\n self.q = self.alpha * np.ones(self.d)\n <mask token>\n\n def add_new_article(self, line):\n article_id = int(line.split(' ')[0])\n if article_id in self.bad_articles:\n return -1\n if article_id not in self.article_ids:\n try:\n article = to_vector(line)\n except IndexError:\n self.bad_articles.add(article_id)\n return -1\n self.article_ids[article_id] = len(self.article_ids)\n self.articles_1_d = np.append(self.articles_1_d, article).reshape([\n len(self.article_ids), self.d])\n return article_id\n\n def to_minimize(self, w):\n return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np\n .log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\n def update(self, user, selected_article, click):\n self.impressions += 1\n self.batch_ids.append(self.article_ids[selected_article])\n self.batch_clicks = np.append(self.batch_clicks, click)\n if self.impressions % self.batch_size == 0:\n w = np.random.normal(0, 1, self.d)\n self.batch_articles = self.articles_1_d[self.batch_ids].reshape([\n self.d, self.batch_size])\n res = minimize(self.to_minimize, w, method='nelder-mead',\n options={'xtol': 1e-08, 'disp': False})\n self.m = res.x\n p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))\n for i in np.arange(0, self.d):\n self.q[i] += sum(self.batch_articles[i] * self.\n batch_articles[i] * p[i] * (1 - p[i]))\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n <mask token>\n\n def select(self, user, pre_selected_article, lines, total_impressions,\n click):\n selected_article = -1\n warmup = False\n if self.impressions < self.training_size:\n for line in lines:\n self.add_new_article(line)\n self.update(user, pre_selected_article, click)\n selected_article = pre_selected_article\n warmup = True\n else:\n best_value = 0\n best_value_articles = list()\n sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /\n self.q))\n for line in lines:\n article_id = self.add_new_article(line)\n if article_id == -1:\n continue\n a_id = self.article_ids[article_id]\n article = self.articles_1_d[a_id]\n cur_value = self.sigmoid(sample_w.dot(article))\n if cur_value > best_value:\n best_value_articles = list([article_id])\n best_value = cur_value\n elif cur_value == best_value:\n best_value_articles.append(article_id)\n index = random.randint(0, len(best_value_articles) - 1)\n selected_article = best_value_articles[index]\n return selected_article, warmup\n",
"step-3": "<mask token>\n\n\nclass TS_RLR:\n\n def __init__(self, alpha):\n self.d = 6\n self.k = 6\n self.alpha = alpha\n self.batch_size = 1000\n self.training_size = 1000\n self.impressions = 0\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n self.articles_1_d = np.array([])\n self.article_ids = dict()\n self.bad_articles = set()\n self.mu = np.zeros(self.d)\n self.q = self.alpha * np.ones(self.d)\n <mask token>\n\n def add_new_article(self, line):\n article_id = int(line.split(' ')[0])\n if article_id in self.bad_articles:\n return -1\n if article_id not in self.article_ids:\n try:\n article = to_vector(line)\n except IndexError:\n self.bad_articles.add(article_id)\n return -1\n self.article_ids[article_id] = len(self.article_ids)\n self.articles_1_d = np.append(self.articles_1_d, article).reshape([\n len(self.article_ids), self.d])\n return article_id\n\n def to_minimize(self, w):\n return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np\n .log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\n def update(self, user, selected_article, click):\n self.impressions += 1\n self.batch_ids.append(self.article_ids[selected_article])\n self.batch_clicks = np.append(self.batch_clicks, click)\n if self.impressions % self.batch_size == 0:\n w = np.random.normal(0, 1, self.d)\n self.batch_articles = self.articles_1_d[self.batch_ids].reshape([\n self.d, self.batch_size])\n res = minimize(self.to_minimize, w, method='nelder-mead',\n options={'xtol': 1e-08, 'disp': False})\n self.m = res.x\n p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))\n for i in np.arange(0, self.d):\n self.q[i] += sum(self.batch_articles[i] * self.\n batch_articles[i] * p[i] * (1 - p[i]))\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n\n def warmup(self, file):\n pass\n\n def select(self, user, pre_selected_article, lines, total_impressions,\n click):\n selected_article = -1\n warmup = False\n if self.impressions < self.training_size:\n for line in lines:\n self.add_new_article(line)\n self.update(user, pre_selected_article, click)\n selected_article = pre_selected_article\n warmup = True\n else:\n best_value = 0\n best_value_articles = list()\n sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /\n self.q))\n for line in lines:\n article_id = self.add_new_article(line)\n if article_id == -1:\n continue\n a_id = self.article_ids[article_id]\n article = self.articles_1_d[a_id]\n cur_value = self.sigmoid(sample_w.dot(article))\n if cur_value > best_value:\n best_value_articles = list([article_id])\n best_value = cur_value\n elif cur_value == best_value:\n best_value_articles.append(article_id)\n index = random.randint(0, len(best_value_articles) - 1)\n selected_article = best_value_articles[index]\n return selected_article, warmup\n",
"step-4": "<mask token>\n\n\nclass TS_RLR:\n\n def __init__(self, alpha):\n self.d = 6\n self.k = 6\n self.alpha = alpha\n self.batch_size = 1000\n self.training_size = 1000\n self.impressions = 0\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n self.articles_1_d = np.array([])\n self.article_ids = dict()\n self.bad_articles = set()\n self.mu = np.zeros(self.d)\n self.q = self.alpha * np.ones(self.d)\n\n def sigmoid(self, x):\n return 1.0 / (1.0 + math.exp(-x))\n\n def add_new_article(self, line):\n article_id = int(line.split(' ')[0])\n if article_id in self.bad_articles:\n return -1\n if article_id not in self.article_ids:\n try:\n article = to_vector(line)\n except IndexError:\n self.bad_articles.add(article_id)\n return -1\n self.article_ids[article_id] = len(self.article_ids)\n self.articles_1_d = np.append(self.articles_1_d, article).reshape([\n len(self.article_ids), self.d])\n return article_id\n\n def to_minimize(self, w):\n return 1 / 2 * sum(self.q * (w - self.mu) * (w - self.mu)) + sum(np\n .log(1 + np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\n def update(self, user, selected_article, click):\n self.impressions += 1\n self.batch_ids.append(self.article_ids[selected_article])\n self.batch_clicks = np.append(self.batch_clicks, click)\n if self.impressions % self.batch_size == 0:\n w = np.random.normal(0, 1, self.d)\n self.batch_articles = self.articles_1_d[self.batch_ids].reshape([\n self.d, self.batch_size])\n res = minimize(self.to_minimize, w, method='nelder-mead',\n options={'xtol': 1e-08, 'disp': False})\n self.m = res.x\n p = 1 / (1 + np.exp(-self.m.dot(self.batch_articles)))\n for i in np.arange(0, self.d):\n self.q[i] += sum(self.batch_articles[i] * self.\n batch_articles[i] * p[i] * (1 - p[i]))\n self.batch_ids = list([])\n self.batch_clicks = np.array([])\n\n def warmup(self, file):\n pass\n\n def select(self, user, pre_selected_article, lines, total_impressions,\n click):\n selected_article = -1\n warmup = False\n if self.impressions < self.training_size:\n for line in lines:\n self.add_new_article(line)\n self.update(user, pre_selected_article, click)\n selected_article = pre_selected_article\n warmup = True\n else:\n best_value = 0\n best_value_articles = list()\n sample_w = np.random.multivariate_normal(self.mu, np.diag(1 /\n self.q))\n for line in lines:\n article_id = self.add_new_article(line)\n if article_id == -1:\n continue\n a_id = self.article_ids[article_id]\n article = self.articles_1_d[a_id]\n cur_value = self.sigmoid(sample_w.dot(article))\n if cur_value > best_value:\n best_value_articles = list([article_id])\n best_value = cur_value\n elif cur_value == best_value:\n best_value_articles.append(article_id)\n index = random.randint(0, len(best_value_articles) - 1)\n selected_article = best_value_articles[index]\n return selected_article, warmup\n",
"step-5": "import numpy as np\nimport math\nimport random\nfrom numpy.linalg import inv\nfrom scipy.optimize import minimize\n\nfrom Util import to_vector\n\nclass TS_RLR:\n\t\n\tdef __init__(self, alpha):\n\t\tself.d = 6\n\t\tself.k = 6\n\n\t\tself.alpha = alpha\n\t\tself.batch_size = 1000\n\t\tself.training_size = 1000\n\t\tself.impressions = 0\n\t\tself.batch_ids = list([])\n\t\tself.batch_clicks = np.array([])\n\n\t\tself.articles_1_d = np.array([])\n\t\tself.article_ids = dict()\n\t\tself.bad_articles = set()\n\n\t\tself.mu = np.zeros(self.d)\n\t\tself.q = self.alpha * np.ones(self.d)\n\t\t\n\tdef sigmoid(self, x):\n\t\t# print(x)\n\t\treturn 1.0 / (1.0 + math.exp(-x))\n\n\tdef add_new_article(self, line):\n\t\tarticle_id = int(line.split(\" \")[0])\n\t\t\t\n\t\tif article_id in self.bad_articles:\n\t\t\treturn -1\n\n\t\tif article_id not in self.article_ids:\n\t\t\ttry:\n\t\t\t\tarticle = to_vector(line)\n\t\t\texcept IndexError:\n\t\t\t\tself.bad_articles.add(article_id)\n\t\t\t\treturn -1\n\t\t\t\n\t\t\tself.article_ids[article_id] = len(self.article_ids)\n\t\t\tself.articles_1_d = np.append(self.articles_1_d, article).reshape([len(self.article_ids), self.d])\n\t\t\t\n\t\treturn article_id\n\n\tdef to_minimize(self, w):\n\t\treturn 1/2 * sum (self.q * (w - self.mu) * (w - self.mu)) + sum(np.log(1+np.exp(-self.batch_clicks * w.dot(self.batch_articles))))\n\t\n\n\tdef update(self, user, selected_article, click):\n\t\tself.impressions += 1\n\t\tself.batch_ids.append(self.article_ids[selected_article])\n\t\tself.batch_clicks = np.append(self.batch_clicks, click)\n\n\t\tif self.impressions % self.batch_size == 0:\n\t\t\tw = np.random.normal(0, 1, self.d)\n\t\t\tself.batch_articles = self.articles_1_d[self.batch_ids].reshape([self.d, self.batch_size])\n\n\t\t\tres = minimize(self.to_minimize, w, method='nelder-mead', options={'xtol': 1e-8, 'disp': False})\n\t\t\tself.m = res.x\n\t\t\t\n\t\t\tp = 1/(1 + np.exp(- self.m.dot(self.batch_articles)))\n\n\t\t\tfor i in np.arange(0, self.d):\n\t\t\t\t\tself.q[i] += sum(self.batch_articles[i] * self.batch_articles[i] * p[i] * (1-p[i]))\n\t\t\t\t\n\t\t\tself.batch_ids = list([])\n\t\t\tself.batch_clicks = np.array([])\n\n\t\n\tdef warmup(self, file):\n\t\tpass\n\n\tdef select(self, user, pre_selected_article, lines, total_impressions, click):\n\t\tselected_article = -1\n\t\twarmup = False\n\t\t\n\t\tif self.impressions < self.training_size:\n\t\t\tfor line in lines:\n\t\t\t\tself.add_new_article(line)\n\n\t\t\tself.update(user, pre_selected_article, click)\n\t\t\tselected_article = pre_selected_article\n\t\t\twarmup = True\n\t\t\n\t\telse:\n\t\t\tbest_value = 0\n\t\t\tbest_value_articles = list()\n\n\t\t\tsample_w = np.random.multivariate_normal(self.mu, np.diag(1/self.q))\n\t\t\tfor line in lines:\n\t\t\t\tarticle_id = self.add_new_article(line)\n\t\t\t\tif article_id == -1 : \n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\ta_id = self.article_ids[article_id]\n\t\t\t\tarticle = self.articles_1_d[a_id]\n\t\t\t\t\n\t\t\t\tcur_value = self.sigmoid(sample_w.dot(article))\n\t\t\n\t\t\t\tif cur_value > best_value:\n\t\t\t\t\tbest_value_articles = list([article_id])\n\t\t\t\t\tbest_value = cur_value\n\t\t\t\telif cur_value == best_value:\n\t\t\t\t\tbest_value_articles.append(article_id)\n\n\t\t\tindex = random.randint(0, len(best_value_articles)-1)\t\n\t\t\tselected_article = best_value_articles[index]\n\n\t\treturn selected_article, warmup\n\n\n\n\n\n",
"step-ids": [
1,
6,
7,
8,
10
]
}
|
[
1,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array, current_hash):
return array[array.index(current_hash) - 1]
<|reserved_special_token_0|>
def go_to_register(conn):
skip_intro(conn)
skip_question(conn)
conn.sendline('r')
def extract_hash_id():
conn = remote(TARGET, PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed), current_hash)
conn.sendline(prev_hash)
if 'Yep!' in conn.recvline():
conn.close()
return hash_id, seed
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open('hash.txt', 'a') as fp:
for tup in hash_list:
fp.write('{} {}\n'.format(tup[0], tup[1]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array, current_hash):
return array[array.index(current_hash) - 1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
skip_intro(conn)
skip_question(conn)
conn.sendline('r')
def extract_hash_id():
conn = remote(TARGET, PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed), current_hash)
conn.sendline(prev_hash)
if 'Yep!' in conn.recvline():
conn.close()
return hash_id, seed
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open('hash.txt', 'a') as fp:
for tup in hash_list:
fp.write('{} {}\n'.format(tup[0], tup[1]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
LIMIT = 500
TARGET = 'shell2017.picoctf.com'
PORT = 46290
FILE = 'hash.txt'
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array, current_hash):
return array[array.index(current_hash) - 1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
skip_intro(conn)
skip_question(conn)
conn.sendline('r')
def extract_hash_id():
conn = remote(TARGET, PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed), current_hash)
conn.sendline(prev_hash)
if 'Yep!' in conn.recvline():
conn.close()
return hash_id, seed
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open('hash.txt', 'a') as fp:
for tup in hash_list:
fp.write('{} {}\n'.format(tup[0], tup[1]))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import md5
from pwn import *
import time
LIMIT = 500
TARGET = 'shell2017.picoctf.com'
PORT = 46290
FILE = 'hash.txt'
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array, current_hash):
return array[array.index(current_hash) - 1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
skip_intro(conn)
skip_question(conn)
conn.sendline('r')
def extract_hash_id():
conn = remote(TARGET, PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed), current_hash)
conn.sendline(prev_hash)
if 'Yep!' in conn.recvline():
conn.close()
return hash_id, seed
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open('hash.txt', 'a') as fp:
for tup in hash_list:
fp.write('{} {}\n'.format(tup[0], tup[1]))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python2
import md5
from pwn import *
import time
LIMIT = 500
TARGET = "shell2017.picoctf.com"
PORT = 46290
FILE = "hash.txt"
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array,current_hash):
return array[array.index(current_hash)-1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
#there's a bunch of crap that needs to be skipped for recvline()
skip_intro(conn)
skip_question(conn)
conn.sendline("r")
def extract_hash_id():
conn = remote(TARGET,PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed),current_hash)
conn.sendline(prev_hash)
#Yep is in the validated hash, so we will use this as the success metric
if "Yep!" in conn.recvline():
conn.close()
return (hash_id, seed)
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open("hash.txt","a") as fp:
for tup in hash_list:
fp.write("{} {}\n".format(tup[0],tup[1]))
#I went back into the code to use this function whenever I found a match in my hash text file
# print(find_prev_hash(generate_hashes("ead81fe8cfe9fda9e4c2093e17e4d024"),"58cb392a127b699c6f22f228e23ae73e"))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "5e78992df94cbbe441495b7d8fb80104ec000748",
"index": 6728,
"step-1": "<mask token>\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\n<mask token>\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\n<mask token>\n",
"step-3": "<mask token>\nLIMIT = 500\nTARGET = 'shell2017.picoctf.com'\nPORT = 46290\nFILE = 'hash.txt'\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import md5\nfrom pwn import *\nimport time\nLIMIT = 500\nTARGET = 'shell2017.picoctf.com'\nPORT = 46290\nFILE = 'hash.txt'\n\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n for i in range(1000):\n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n return a\n\n\ndef find_prev_hash(array, current_hash):\n return array[array.index(current_hash) - 1]\n\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\n\ndef go_to_register(conn):\n skip_intro(conn)\n skip_question(conn)\n conn.sendline('r')\n\n\ndef extract_hash_id():\n conn = remote(TARGET, PORT)\n go_to_register(conn)\n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n prev_hash = find_prev_hash(generate_hashes(seed), current_hash)\n conn.sendline(prev_hash)\n if 'Yep!' in conn.recvline():\n conn.close()\n return hash_id, seed\n conn.close()\n return None\n\n\ndef main():\n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open('hash.txt', 'a') as fp:\n for tup in hash_list:\n fp.write('{} {}\\n'.format(tup[0], tup[1]))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python2\nimport md5 \nfrom pwn import *\nimport time\n\nLIMIT = 500\nTARGET = \"shell2017.picoctf.com\"\nPORT = 46290\nFILE = \"hash.txt\"\n\ndef generate_hashes(seed):\n a = []\n current_hash = seed\n \n for i in range(1000): \n current_hash = md5.new(current_hash).hexdigest()\n a.append(current_hash)\n \n return a\n\ndef find_prev_hash(array,current_hash):\n return array[array.index(current_hash)-1]\n\ndef skip_intro(conn):\n for i in range(6):\n conn.recvline()\n\ndef skip_question(conn):\n for i in range(4):\n conn.recvline()\n\ndef go_to_register(conn):\n #there's a bunch of crap that needs to be skipped for recvline()\n skip_intro(conn)\n skip_question(conn)\n conn.sendline(\"r\")\n \ndef extract_hash_id():\n conn = remote(TARGET,PORT) \n go_to_register(conn)\n \n id_and_seed = conn.recvline().split()\n hash_id = id_and_seed[7].rstrip('\\n')\n seed = id_and_seed[-1].rstrip('\\n')\n\n conn.recvline()\n current_hash = conn.recvline().rstrip('\\n')\n \n prev_hash = find_prev_hash(generate_hashes(seed),current_hash)\n conn.sendline(prev_hash)\n \n #Yep is in the validated hash, so we will use this as the success metric\n if \"Yep!\" in conn.recvline():\n conn.close()\n return (hash_id, seed)\n conn.close()\n return None\n \ndef main(): \n hash_list = []\n for i in range(1000):\n try:\n hash_list.append(extract_hash_id())\n except:\n time.sleep(1)\n continue\n with open(\"hash.txt\",\"a\") as fp:\n for tup in hash_list:\n fp.write(\"{} {}\\n\".format(tup[0],tup[1])) \n\n #I went back into the code to use this function whenever I found a match in my hash text file\n # print(find_prev_hash(generate_hashes(\"ead81fe8cfe9fda9e4c2093e17e4d024\"),\"58cb392a127b699c6f22f228e23ae73e\"))\n \nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
7,
9,
10,
11
]
}
|
[
5,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
EMAIL_ADDRESS = os.environ.get('EMAIL_USER')
EMAIL_PASSWORD = os.environ.get('EMAIL_PASS')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
from flask import Flask, request, render_template, redirect
import os
import smtplib
from email.message import EmailMessage
app = Flask(__name__)
EMAIL_ADDRESS = os.environ.get('EMAIL_USER')
EMAIL_PASSWORD = os.environ.get('EMAIL_PASS')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
flexible
|
{
"blob_id": "27d9e6a868cfc18780ec9615e8dbc3b5ea2fd0c3",
"index": 1399,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-3": "<mask token>\napp = Flask(__name__)\nEMAIL_ADDRESS = os.environ.get('EMAIL_USER')\nEMAIL_PASSWORD = os.environ.get('EMAIL_PASS')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "from flask import Flask, request, render_template, redirect\nimport os\nimport smtplib\nfrom email.message import EmailMessage\napp = Flask(__name__)\nEMAIL_ADDRESS = os.environ.get('EMAIL_USER')\nEMAIL_PASSWORD = os.environ.get('EMAIL_PASS')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row('ok')
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,
'confirm'), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.
addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %
msg.from_user.first_name, reply_markup=markups.welcome(),
parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call
.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp='Выбор валюты')
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id, 'currency'),
reply_markup=markups.currency())
<|reserved_special_token_0|>
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text='English',
callback_data='langeng')
bt_rus = telebot.types.InlineKeyboardButton(text='Русский',
callback_data='langrus')
bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',
callback_data='langukr')
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=
markups.payments())
<|reserved_special_token_0|>
@bot.message_handler(regexp='Кошелёк')
def wallet(msg):
bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),
reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',
msg.chat.id, msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.
message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg = call.message
bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg
.message_id, reply_markup=markups.withdraw())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,
reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.
message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups
.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, 'Курс')
@bot.message_handler(regexp='Настройки')
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),
reply_markup=markups.settings())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')
def lang(call):
chat = call.message.chat
bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,
reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,
msg.message_id, reply_markup=markups.rate())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row('ok')
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,
'confirm'), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.
addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %
msg.from_user.first_name, reply_markup=markups.welcome(),
parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call
.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp='Выбор валюты')
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id, 'currency'),
reply_markup=markups.currency())
<|reserved_special_token_0|>
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text='English',
callback_data='langeng')
bt_rus = telebot.types.InlineKeyboardButton(text='Русский',
callback_data='langrus')
bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',
callback_data='langukr')
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
@bot.message_handler(regexp='Назад')
def back(msg):
bot.send_message(msg.chat.id, 'Операции покупки или продажи',
reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),
reply_markup=markups.menu())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=
markups.payments())
<|reserved_special_token_0|>
@bot.message_handler(regexp='Кошелёк')
def wallet(msg):
bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),
reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',
msg.chat.id, msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.
message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg = call.message
bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg
.message_id, reply_markup=markups.withdraw())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,
reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.
message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups
.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, 'Курс')
@bot.message_handler(regexp='Настройки')
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),
reply_markup=markups.settings())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')
def lang(call):
chat = call.message.chat
bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,
reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,
msg.message_id, reply_markup=markups.rate())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@bot.message_handler(commands=['start'])
def start(message):
chat = message.chat
msg = bot.send_message(chat.id, 'Select a language in the list',
reply_markup=markups.language())
bot.register_next_step_handler(msg, llanguage)
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row('ok')
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,
'confirm'), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.
addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %
msg.from_user.first_name, reply_markup=markups.welcome(),
parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call
.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp='Выбор валюты')
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id, 'currency'),
reply_markup=markups.currency())
<|reserved_special_token_0|>
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text='English',
callback_data='langeng')
bt_rus = telebot.types.InlineKeyboardButton(text='Русский',
callback_data='langrus')
bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',
callback_data='langukr')
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'requests')
def my_requests(call):
text = base.get_text(call.message.chat.id, 'no_req')
bot.edit_message_text(text, call.message.chat.id, call.message.message_id)
bot.edit_message_reply_markup(call.message.chat.id, call.message.
message_id, reply_markup=markups.add_request(call.message.chat.id))
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
@bot.message_handler(regexp='Назад')
def back(msg):
bot.send_message(msg.chat.id, 'Операции покупки или продажи',
reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),
reply_markup=markups.menu())
@bot.message_handler(regexp='Обменные операции')
def exchange(msg):
bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.
exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),
reply_markup=markups.exchangeI())
@bot.callback_query_handler(func=lambda call: call.data == 'buy')
def buy(call):
chat = call.message.chat
bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),
reply_markup=markups.buyI_sellI())
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=
markups.payments())
@bot.callback_query_handler(func=lambda call: call.data == 'sell')
def sell(call):
chat = call.message.chat
bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),
reply_markup=markups.buyI_sellI())
@bot.message_handler(regexp='Кошелёк')
def wallet(msg):
bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),
reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',
msg.chat.id, msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.
message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg = call.message
bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg
.message_id, reply_markup=markups.withdraw())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')
def wwithdraw(call):
msg = call.message
bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.
chat.id, msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'my requests')
def user_requests(call):
bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,
reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.
message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups
.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, 'Курс')
@bot.message_handler(regexp='Настройки')
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),
reply_markup=markups.settings())
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')
def lang(call):
chat = call.message.chat
bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,
reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,
msg.message_id, reply_markup=markups.rate())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')
def burses(call):
number_of_burse = call.data[5:]
msg = call.message
markup = telebot.types.InlineKeyboardMarkup()
bt_back_to_rates = telebot.types.InlineKeyboardButton(text=
'Вернуться к выбору биржы', callback_data='rate')
markup.add(bt_back_to_rates)
bot.edit_message_text(
'Для пары BTC/RUB теперь используются котировки биржи ...название...',
msg.chat.id, msg.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'address')
def address_cur(call):
msg = call.message
bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,
reply_markup=markups.address())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
<|reserved_special_token_0|>
@bot.message_handler(regexp='О сервисе')
def service(msg):
bot.send_message(msg.chat.id, 'Нужно придумать')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import base
import telebot
import markups
from starter import start_bot, bot
@bot.message_handler(commands=['start'])
def start(message):
chat = message.chat
msg = bot.send_message(chat.id, 'Select a language in the list',
reply_markup=markups.language())
bot.register_next_step_handler(msg, llanguage)
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row('ok')
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,
'confirm'), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.
addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %
msg.from_user.first_name, reply_markup=markups.welcome(),
parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call
.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp='Выбор валюты')
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id, 'currency'),
reply_markup=markups.currency())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')
def currency(call):
current_currency = call.data[4:]
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text='English',
callback_data='langeng')
bt_rus = telebot.types.InlineKeyboardButton(text='Русский',
callback_data='langrus')
bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',
callback_data='langukr')
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'lang')
def lan(call):
chat = call.message.chat
new_lan = call.data[4:]
bot.edit_message_text('Вы выбрали язык', chat.id, call.message.
message_id, reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'requests')
def my_requests(call):
text = base.get_text(call.message.chat.id, 'no_req')
bot.edit_message_text(text, call.message.chat.id, call.message.message_id)
bot.edit_message_reply_markup(call.message.chat.id, call.message.
message_id, reply_markup=markups.add_request(call.message.chat.id))
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
@bot.message_handler(regexp='Назад')
def back(msg):
bot.send_message(msg.chat.id, 'Операции покупки или продажи',
reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),
reply_markup=markups.menu())
@bot.message_handler(regexp='Обменные операции')
def exchange(msg):
bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.
exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),
reply_markup=markups.exchangeI())
@bot.callback_query_handler(func=lambda call: call.data == 'buy')
def buy(call):
chat = call.message.chat
bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),
reply_markup=markups.buyI_sellI())
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=
markups.payments())
@bot.callback_query_handler(func=lambda call: call.data == 'sell')
def sell(call):
chat = call.message.chat
bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),
reply_markup=markups.buyI_sellI())
@bot.message_handler(regexp='Кошелёк')
def wallet(msg):
bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),
reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',
msg.chat.id, msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.
message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg = call.message
bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg
.message_id, reply_markup=markups.withdraw())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')
def wwithdraw(call):
msg = call.message
bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.
chat.id, msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'my requests')
def user_requests(call):
bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,
reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.
message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups
.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, 'Курс')
@bot.message_handler(regexp='Настройки')
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),
reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'settings')
def setings(call):
msg = call.message
bot.edit_message_text(base.get_text(msg.chat.id, 'settings'), msg.chat.
id, msg.message_id, reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')
def lang(call):
chat = call.message.chat
bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,
reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,
msg.message_id, reply_markup=markups.rate())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')
def burses(call):
number_of_burse = call.data[5:]
msg = call.message
markup = telebot.types.InlineKeyboardMarkup()
bt_back_to_rates = telebot.types.InlineKeyboardButton(text=
'Вернуться к выбору биржы', callback_data='rate')
markup.add(bt_back_to_rates)
bot.edit_message_text(
'Для пары BTC/RUB теперь используются котировки биржи ...название...',
msg.chat.id, msg.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'address')
def address_cur(call):
msg = call.message
bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,
reply_markup=markups.address())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
def enter_address(msg):
new_address = msg
bot.send_message(msg.chat.id, 'Информация сохранена')
@bot.message_handler(regexp='О сервисе')
def service(msg):
bot.send_message(msg.chat.id, 'Нужно придумать')
if __name__ == '__main__':
bot.polling()
<|reserved_special_token_1|>
import base
import telebot
import markups
from starter import start_bot, bot
@bot.message_handler(commands=['start'])
def start(message):
chat = message.chat
# welcome(msg)
msg = bot.send_message(chat.id, "Select a language in the list", reply_markup=markups.language())
bot.register_next_step_handler(msg, llanguage)
# base.create_user(chat.id)
def llanguage(msg):
chat = msg.chat
base.create_user(msg.chat.id, msg.text)
markup = telebot.types.ReplyKeyboardMarkup(True, True)
markup.row("ok")
str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"confirm"), reply_markup=markup)
bot.register_next_step_handler(str, welcome)
def welcome(msg):
bot.send_message(msg.chat.id, "Чат-поддержка", reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') % msg.from_user.first_name,
reply_markup=markups.welcome(), parse_mode='html')
@bot.callback_query_handler(func=lambda call: call.data == 'currency')
def select_currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'currency'), chat.id, call.message.message_id, reply_markup=markups.currency())
@bot.message_handler(regexp="Выбор валюты")
def select_currency(msg):
chat = msg.chat
bot.send_message(chat.id, base.get_text(chat.id,'currency'), reply_markup=markups.currency())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')
def currency(call):
current_currency = call.data[4:] # Выбранная валюта
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
def langg():
markup = telebot.types.InlineKeyboardMarkup()
bt_eng = telebot.types.InlineKeyboardButton(text="English", callback_data="langeng")
bt_rus = telebot.types.InlineKeyboardButton(text="Русский", callback_data="langrus")
bt_ukr = telebot.types.InlineKeyboardButton(text="Украiнський", callback_data="langukr")
markup.add(bt_eng)
markup.add(bt_rus)
markup.add(bt_ukr)
return markup
@bot.callback_query_handler(func=lambda call: call.data[:4] == "lang")
def lan(call):
chat = call.message.chat
new_lan = call.data[4:]
bot.edit_message_text( "Вы выбрали язык",chat.id,call.message.message_id,reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'requests')
def my_requests(call):
text = base.get_text(call.message.chat.id, 'no_req')
bot.edit_message_text(text, call.message.chat.id, call.message.message_id)
bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,
reply_markup=markups.add_request(call.message.chat.id))
@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')
def currency(call):
chat = call.message.chat
bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,
call.message.message_id, reply_markup=markups.menu())
@bot.message_handler(regexp="Назад")
def back(msg):
bot.send_message(msg.chat.id, "Операции покупки или продажи", reply_markup=markups.addWelcome())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"operations"), reply_markup=markups.menu())
@bot.message_handler(regexp="Обменные операции")
def exchange(msg):
bot.send_message(msg.chat.id, "Купить/Продать", reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,"exchamge"), reply_markup=markups.exchangeI())
@bot.callback_query_handler(func=lambda call: call.data == 'buy')
def buy(call):
chat = call.message.chat
bot.send_message(chat.id, "Покупка", reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id,'buycur'), reply_markup=markups.buyI_sellI())
@bot.callback_query_handler(func=lambda call: call.data == 'monero')
def monero(call):
chat = call.message.chat
bot.send_message(chat.id, "Покупка/Продажа Monero", reply_markup=markups.payments())
@bot.callback_query_handler(func=lambda call: call.data == 'sell')
def sell(call):
chat = call.message.chat
bot.send_message(chat.id, "Продажа", reply_markup=markups.exchangeR())
bot.send_message(chat.id, base.get_text(chat.id,'sellcur'), reply_markup=markups.buyI_sellI())
@bot.message_handler(regexp="Кошелёк")
def wallet(msg):
bot.send_message(msg.chat.id, "Кошелёк", reply_markup=markups.exchangeR())
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'wallet'), reply_markup=markups.wallet())
@bot.callback_query_handler(func=lambda call: call.data == 'bringin')
def bring_in(call):
msg = call.message
bot.edit_message_text("Выберете валюту на счёт которой придут бабосы", msg.chat.id,
msg.message_id, reply_markup=markups.bringin())
@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')
def bbring(call):
msg = call.message
bot.edit_message_text("Внесите " + call.data[6:], msg.chat.id, msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')
def withdraw(call):
msg=call.message
bot.edit_message_text("С какой валюты списать бобосы",msg.chat.id,msg.message_id,reply_markup=markups.withdraw())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')
def wwithdraw(call):
msg=call.message
bot.edit_message_text("Введите сколько вывести" + call.data[5:],msg.chat.id,msg.message_id)
@bot.callback_query_handler(func=lambda call: call.data == "my requests")
def user_requests(call):
bot.send_message(call.message.chat.id, "Если нужно,то просто раскомменти")
# markup = telebot.types.InlineKeyboardMarkup()
# data = base.get_user_requests(call.message.chat.id)
# val = base.get_user_value(call.message.chat.id)
# if not data:
# btn_add = telebot.types.InlineKeyboardButton("📝 Добавить объявление", callback_data='add request')
# back = telebot.types.InlineKeyboardButton(text="Назад",
# callback_data='exchange')
# markup.row(btn_add, back)
# bot.edit_message_text("У вас нет объявлений", call.message.chat.id, call.message.message_id)
# bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,
# reply_markup=markup)
#
#
# else:
# for each in data:
# btn = telebot.types.InlineKeyboardButton(
# text=each.rType + ", " + each.paymentMethod + ", " + each.rate + " " + each.currency,
# callback_data=each.currency + "->" + each.rid)
# markup.row(btn)
# btn_add = telebot.types.InlineKeyboardButton("📝 Добавить объявление", callback_data='add request')
# back = telebot.types.InlineKeyboardButton(text="Назад",
# callback_data='exchange')
# markup.row(btn_add, back)
# bot.edit_message_text("Что-то там про объявления",
# call.message.chat.id, call.message.message_id, parse_mode="markdown")
# bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'add request')
def add_request(call):
msg = call.message
bot.edit_message_text("Выберите валюту", msg.chat.id, msg.message_id, reply_markup=markups.request_curr())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')
def req_cur(call):
cur = call.data[4:]
msg = call.message
bot.edit_message_text("Выберите тип объявления", msg.chat.id, msg.message_id, reply_markup=markups.request_type())
@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')
@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')
def req_buy(call):
msg = call.message
ms = bot.send_message(msg.chat.id, "Метод оплаты", reply_markup=markups.pay_method())
bot.register_next_step_handler(ms, rate)
def rate(msg):
bot.send_message(msg.chat.id, "Курс")
@bot.message_handler(regexp="Настройки")
def settings(msg):
bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'settings'), reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == 'settings')
def setings(call):
msg = call.message
bot.edit_message_text(base.get_text(msg.chat.id,'settings'), msg.chat.id, msg.message_id, reply_markup=markups.settings())
@bot.callback_query_handler(func=lambda call: call.data == "chooselanguage")
def lang(call):
chat = call.message.chat
bot.edit_message_text( "Выберите язык",chat.id,call.message.message_id, reply_markup=langg())
@bot.callback_query_handler(func=lambda call: call.data == 'rate')
def rat(call):
msg = call.message
bot.edit_message_text("Выберите источник актульного курса", msg.chat.id, msg.message_id,
reply_markup=markups.rate())
@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')
def burses(call):
number_of_burse = call.data[5:]
msg = call.message
markup = telebot.types.InlineKeyboardMarkup()
bt_back_to_rates = telebot.types.InlineKeyboardButton(text="Вернуться к выбору биржы", callback_data='rate')
markup.add(bt_back_to_rates)
bot.edit_message_text("Для пары BTC/RUB теперь используются котировки биржи ...название...", msg.chat.id,
msg.message_id, reply_markup=markup)
@bot.callback_query_handler(func=lambda call: call.data == 'address')
def address_cur(call):
msg = call.message
bot.edit_message_text("Выберите валюту", msg.chat.id, msg.message_id, reply_markup=markups.address())
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')
def address(call):
msg = call.message
mes = bot.edit_message_text("Введите адрес", msg.chat.id, msg.message_id)
bot.register_next_step_handler(mes, enter_address)
def enter_address(msg):
new_address = msg
bot.send_message(msg.chat.id, "Информация сохранена")
@bot.message_handler(regexp="О сервисе")
def service(msg):
bot.send_message(msg.chat.id,"Нужно придумать")
if __name__ == "__main__":
bot.polling()
# start_bot()
|
flexible
|
{
"blob_id": "7cc77de31adff5b4a394f117fc743cd6dd4bc06c",
"index": 6065,
"step-1": "<mask token>\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n<mask token>\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n<mask token>\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n msg = bot.send_message(chat.id, 'Select a language in the list',\n reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n<mask token>\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.\n message_id, reply_markup=markups.add_request(call.message.chat.id))\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Обменные операции')\ndef exchange(msg):\n bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.\n exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),\n reply_markup=markups.exchangeI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg = call.message\n bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.\n chat.id, msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'my requests')\ndef user_requests(call):\n bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\n 'Вернуться к выбору биржы', callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\n 'Для пары BTC/RUB теперь используются котировки биржи ...название...',\n msg.chat.id, msg.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.address())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\n<mask token>\n\n\n@bot.message_handler(regexp='О сервисе')\ndef service(msg):\n bot.send_message(msg.chat.id, 'Нужно придумать')\n\n\n<mask token>\n",
"step-4": "import base\nimport telebot\nimport markups\nfrom starter import start_bot, bot\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n msg = bot.send_message(chat.id, 'Select a language in the list',\n reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row('ok')\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\n 'confirm'), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, 'Чат-поддержка', reply_markup=markups.\n addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') %\n msg.from_user.first_name, reply_markup=markups.welcome(),\n parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'currency'), chat.id, call\n .message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp='Выбор валюты')\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id, 'currency'),\n reply_markup=markups.currency())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')\ndef currency(call):\n current_currency = call.data[4:]\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text='English',\n callback_data='langeng')\n bt_rus = telebot.types.InlineKeyboardButton(text='Русский',\n callback_data='langrus')\n bt_ukr = telebot.types.InlineKeyboardButton(text='Украiнський',\n callback_data='langukr')\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'lang')\ndef lan(call):\n chat = call.message.chat\n new_lan = call.data[4:]\n bot.edit_message_text('Вы выбрали язык', chat.id, call.message.\n message_id, reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.\n message_id, reply_markup=markups.add_request(call.message.chat.id))\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id, 'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Назад')\ndef back(msg):\n bot.send_message(msg.chat.id, 'Операции покупки или продажи',\n reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'operations'),\n reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp='Обменные операции')\ndef exchange(msg):\n bot.send_message(msg.chat.id, 'Купить/Продать', reply_markup=markups.\n exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'exchamge'),\n reply_markup=markups.exchangeI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'buycur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Покупка/Продажа Monero', reply_markup=\n markups.payments())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, 'Продажа', reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id, 'sellcur'),\n reply_markup=markups.buyI_sellI())\n\n\n@bot.message_handler(regexp='Кошелёк')\ndef wallet(msg):\n bot.send_message(msg.chat.id, 'Кошелёк', reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'wallet'),\n reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text('Выберете валюту на счёт которой придут бабосы',\n msg.chat.id, msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text('Внесите ' + call.data[6:], msg.chat.id, msg.\n message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg = call.message\n bot.edit_message_text('С какой валюты списать бобосы', msg.chat.id, msg\n .message_id, reply_markup=markups.withdraw())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg = call.message\n bot.edit_message_text('Введите сколько вывести' + call.data[5:], msg.\n chat.id, msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'my requests')\ndef user_requests(call):\n bot.send_message(call.message.chat.id, 'Если нужно,то просто раскомменти')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text('Выберите тип объявления', msg.chat.id, msg.\n message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, 'Метод оплаты', reply_markup=markups\n .pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, 'Курс')\n\n\n@bot.message_handler(regexp='Настройки')\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'settings'),\n reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'settings')\ndef setings(call):\n msg = call.message\n bot.edit_message_text(base.get_text(msg.chat.id, 'settings'), msg.chat.\n id, msg.message_id, reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'chooselanguage')\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text('Выберите язык', chat.id, call.message.message_id,\n reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text('Выберите источник актульного курса', msg.chat.id,\n msg.message_id, reply_markup=markups.rate())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\n 'Вернуться к выбору биржы', callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\n 'Для пары BTC/RUB теперь используются котировки биржи ...название...',\n msg.chat.id, msg.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text('Выберите валюту', msg.chat.id, msg.message_id,\n reply_markup=markups.address())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text('Введите адрес', msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\ndef enter_address(msg):\n new_address = msg\n bot.send_message(msg.chat.id, 'Информация сохранена')\n\n\n@bot.message_handler(regexp='О сервисе')\ndef service(msg):\n bot.send_message(msg.chat.id, 'Нужно придумать')\n\n\nif __name__ == '__main__':\n bot.polling()\n",
"step-5": "import base\nimport telebot\nimport markups\nfrom starter import start_bot, bot\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n chat = message.chat\n # welcome(msg)\n msg = bot.send_message(chat.id, \"Select a language in the list\", reply_markup=markups.language())\n bot.register_next_step_handler(msg, llanguage)\n # base.create_user(chat.id)\n\n\ndef llanguage(msg):\n chat = msg.chat\n base.create_user(msg.chat.id, msg.text)\n markup = telebot.types.ReplyKeyboardMarkup(True, True)\n markup.row(\"ok\")\n str = bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"confirm\"), reply_markup=markup)\n bot.register_next_step_handler(str, welcome)\n\n\ndef welcome(msg):\n bot.send_message(msg.chat.id, \"Чат-поддержка\", reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id, 'welcome_inf') % msg.from_user.first_name,\n reply_markup=markups.welcome(), parse_mode='html')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'currency')\ndef select_currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'currency'), chat.id, call.message.message_id, reply_markup=markups.currency())\n\n\n@bot.message_handler(regexp=\"Выбор валюты\")\ndef select_currency(msg):\n chat = msg.chat\n bot.send_message(chat.id, base.get_text(chat.id,'currency'), reply_markup=markups.currency())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'ccur')\ndef currency(call):\n current_currency = call.data[4:] # Выбранная валюта\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\ndef langg():\n markup = telebot.types.InlineKeyboardMarkup()\n bt_eng = telebot.types.InlineKeyboardButton(text=\"English\", callback_data=\"langeng\")\n bt_rus = telebot.types.InlineKeyboardButton(text=\"Русский\", callback_data=\"langrus\")\n bt_ukr = telebot.types.InlineKeyboardButton(text=\"Украiнський\", callback_data=\"langukr\")\n markup.add(bt_eng)\n markup.add(bt_rus)\n markup.add(bt_ukr)\n return markup\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == \"lang\")\ndef lan(call):\n chat = call.message.chat\n new_lan = call.data[4:]\n bot.edit_message_text( \"Вы выбрали язык\",chat.id,call.message.message_id,reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'requests')\ndef my_requests(call):\n text = base.get_text(call.message.chat.id, 'no_req')\n bot.edit_message_text(text, call.message.chat.id, call.message.message_id)\n bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,\n reply_markup=markups.add_request(call.message.chat.id))\n\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'backtomenu')\ndef currency(call):\n chat = call.message.chat\n bot.edit_message_text(base.get_text(chat.id,'operations'), chat.id,\n call.message.message_id, reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp=\"Назад\")\ndef back(msg):\n bot.send_message(msg.chat.id, \"Операции покупки или продажи\", reply_markup=markups.addWelcome())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"operations\"), reply_markup=markups.menu())\n\n\n@bot.message_handler(regexp=\"Обменные операции\")\ndef exchange(msg):\n bot.send_message(msg.chat.id, \"Купить/Продать\", reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,\"exchamge\"), reply_markup=markups.exchangeI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'buy')\ndef buy(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Покупка\", reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id,'buycur'), reply_markup=markups.buyI_sellI())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'monero')\ndef monero(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Покупка/Продажа Monero\", reply_markup=markups.payments())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'sell')\ndef sell(call):\n chat = call.message.chat\n bot.send_message(chat.id, \"Продажа\", reply_markup=markups.exchangeR())\n bot.send_message(chat.id, base.get_text(chat.id,'sellcur'), reply_markup=markups.buyI_sellI())\n\n\n@bot.message_handler(regexp=\"Кошелёк\")\ndef wallet(msg):\n bot.send_message(msg.chat.id, \"Кошелёк\", reply_markup=markups.exchangeR())\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'wallet'), reply_markup=markups.wallet())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'bringin')\ndef bring_in(call):\n msg = call.message\n bot.edit_message_text(\"Выберете валюту на счёт которой придут бабосы\", msg.chat.id,\n msg.message_id, reply_markup=markups.bringin())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:6] == 'bbring')\ndef bbring(call):\n msg = call.message\n bot.edit_message_text(\"Внесите \" + call.data[6:], msg.chat.id, msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'withdraw')\ndef withdraw(call):\n msg=call.message\n bot.edit_message_text(\"С какой валюты списать бобосы\",msg.chat.id,msg.message_id,reply_markup=markups.withdraw())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'wwith')\ndef wwithdraw(call):\n msg=call.message\n bot.edit_message_text(\"Введите сколько вывести\" + call.data[5:],msg.chat.id,msg.message_id)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"my requests\")\ndef user_requests(call):\n bot.send_message(call.message.chat.id, \"Если нужно,то просто раскомменти\")\n # markup = telebot.types.InlineKeyboardMarkup()\n # data = base.get_user_requests(call.message.chat.id)\n # val = base.get_user_value(call.message.chat.id)\n # if not data:\n # btn_add = telebot.types.InlineKeyboardButton(\"📝 Добавить объявление\", callback_data='add request')\n # back = telebot.types.InlineKeyboardButton(text=\"Назад\",\n # callback_data='exchange')\n # markup.row(btn_add, back)\n # bot.edit_message_text(\"У вас нет объявлений\", call.message.chat.id, call.message.message_id)\n # bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id,\n # reply_markup=markup)\n #\n #\n # else:\n # for each in data:\n # btn = telebot.types.InlineKeyboardButton(\n # text=each.rType + \", \" + each.paymentMethod + \", \" + each.rate + \" \" + each.currency,\n # callback_data=each.currency + \"->\" + each.rid)\n # markup.row(btn)\n # btn_add = telebot.types.InlineKeyboardButton(\"📝 Добавить объявление\", callback_data='add request')\n # back = telebot.types.InlineKeyboardButton(text=\"Назад\",\n # callback_data='exchange')\n # markup.row(btn_add, back)\n # bot.edit_message_text(\"Что-то там про объявления\",\n # call.message.chat.id, call.message.message_id, parse_mode=\"markdown\")\n # bot.edit_message_reply_markup(call.message.chat.id, call.message.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'add request')\ndef add_request(call):\n msg = call.message\n bot.edit_message_text(\"Выберите валюту\", msg.chat.id, msg.message_id, reply_markup=markups.request_curr())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'rreq')\ndef req_cur(call):\n cur = call.data[4:]\n msg = call.message\n bot.edit_message_text(\"Выберите тип объявления\", msg.chat.id, msg.message_id, reply_markup=markups.request_type())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'reqsell')\n@bot.callback_query_handler(func=lambda call: call.data == 'reqbuy')\ndef req_buy(call):\n msg = call.message\n ms = bot.send_message(msg.chat.id, \"Метод оплаты\", reply_markup=markups.pay_method())\n bot.register_next_step_handler(ms, rate)\n\n\ndef rate(msg):\n bot.send_message(msg.chat.id, \"Курс\")\n\n\n@bot.message_handler(regexp=\"Настройки\")\ndef settings(msg):\n bot.send_message(msg.chat.id, base.get_text(msg.chat.id,'settings'), reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'settings')\ndef setings(call):\n msg = call.message\n bot.edit_message_text(base.get_text(msg.chat.id,'settings'), msg.chat.id, msg.message_id, reply_markup=markups.settings())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"chooselanguage\")\ndef lang(call):\n chat = call.message.chat\n bot.edit_message_text( \"Выберите язык\",chat.id,call.message.message_id, reply_markup=langg())\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'rate')\ndef rat(call):\n msg = call.message\n bot.edit_message_text(\"Выберите источник актульного курса\", msg.chat.id, msg.message_id,\n reply_markup=markups.rate())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:5] == 'burse')\ndef burses(call):\n number_of_burse = call.data[5:]\n msg = call.message\n markup = telebot.types.InlineKeyboardMarkup()\n bt_back_to_rates = telebot.types.InlineKeyboardButton(text=\"Вернуться к выбору биржы\", callback_data='rate')\n markup.add(bt_back_to_rates)\n bot.edit_message_text(\"Для пары BTC/RUB теперь используются котировки биржи ...название...\", msg.chat.id,\n msg.message_id, reply_markup=markup)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'address')\ndef address_cur(call):\n msg = call.message\n bot.edit_message_text(\"Выберите валюту\", msg.chat.id, msg.message_id, reply_markup=markups.address())\n\n\n@bot.callback_query_handler(func=lambda call: call.data[:4] == 'adrs')\ndef address(call):\n msg = call.message\n mes = bot.edit_message_text(\"Введите адрес\", msg.chat.id, msg.message_id)\n bot.register_next_step_handler(mes, enter_address)\n\n\ndef enter_address(msg):\n new_address = msg\n bot.send_message(msg.chat.id, \"Информация сохранена\")\n\n\n@bot.message_handler(regexp=\"О сервисе\")\ndef service(msg):\n bot.send_message(msg.chat.id,\"Нужно придумать\")\n\n\nif __name__ == \"__main__\":\n bot.polling()\n # start_bot()\n",
"step-ids": [
19,
20,
30,
36,
37
]
}
|
[
19,
20,
30,
36,
37
] |
import urllib.request
class GetData:
key = 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'
url = "http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey=" + key
def main(self):
data = urllib.request.urlopen(self.url).read()
print(data)
f = open("sample.xml", "wb")
f.write(data)
f.close()
getData = GetData()
getData.main()
|
normal
|
{
"blob_id": "58ca520a2f43cef26a95de446f9c7a82819b0b66",
"index": 833,
"step-1": "<mask token>\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\n<mask token>\ngetData.main()\n",
"step-3": "<mask token>\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\ngetData = GetData()\ngetData.main()\n",
"step-4": "import urllib.request\n\n\nclass GetData:\n key = (\n 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n )\n url = (\n 'http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey='\n + key)\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open('sample.xml', 'wb')\n f.write(data)\n f.close()\n\n\ngetData = GetData()\ngetData.main()\n",
"step-5": "import urllib.request\n\nclass GetData:\n key = 'fDs8VW%2BvtwQA8Q9LhBW%2BT2ETVBWWJaITjKfpzDsNJO8ugDsvdboInI16ZD295Txxtxwhc4G3PwMAvxd%2FWvz2gQ%3D%3D&pageNo=1&numOfRows=999'\n url = \"http://apis.data.go.kr/B552657/ErmctInfoInqireService/getEgytBassInfoInqire?serviceKey=\" + key\n\n def main(self):\n data = urllib.request.urlopen(self.url).read()\n print(data)\n f = open(\"sample.xml\", \"wb\")\n f.write(data)\n f.close()\n\ngetData = GetData()\ngetData.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from typing import List
class Solution:
def grayCode(self, n: int) ->List[int]:
res = [0] * 2 ** n
exp = 0
l = r = 1
for i in range(1, 2 ** n):
res[i] += res[r - i] + 2 ** exp
if i == r:
exp += 1
l = r + 1
r = l + 2 ** exp - 1
return res
|
normal
|
{
"blob_id": "dc600763b12edda05820721098e7e5bc80f74c89",
"index": 4798,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def grayCode(self, n: int) ->List[int]:\n res = [0] * 2 ** n\n exp = 0\n l = r = 1\n for i in range(1, 2 ** n):\n res[i] += res[r - i] + 2 ** exp\n if i == r:\n exp += 1\n l = r + 1\n r = l + 2 ** exp - 1\n return res\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def grayCode(self, n: int) ->List[int]:\n res = [0] * 2 ** n\n exp = 0\n l = r = 1\n for i in range(1, 2 ** n):\n res[i] += res[r - i] + 2 ** exp\n if i == r:\n exp += 1\n l = r + 1\n r = l + 2 ** exp - 1\n return res\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import serial, time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.OUT)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(7, GPIO.OUT)
pwm1 = GPIO.PWM(23,100)
pwm2 = GPIO.PWM(24,100)
pwm1.start(100)
pwm2.start(100)
error_1=0
ui_1=0
#Arduino
Arduino=serial.Serial("/dev/ttyACM0",baudrate=9600,timeout=5)
Arduino.flushInput()
def separar(data):
if "distancia" in data:
label = data.split(":")
dist = float(label[1])
print "distancia US: " + str(dist)
global error_1, ui_1
kp=3
kd=0.2
ki=0.2
ref=15
Ts=0.020
error=(ref - dist)
up=(kp*error)
ui=ki*(error)+ui_1*Ts
ud=(kd/Ts)*(error- (error_1))
u=up+ui+ud
print "error:" + str(error)
print "velocidad" + str(u)
error_1=error
ui_1=ui
if u >100:
u=100
if u <-100:
u=-100
if u <0:
u=abs(u)
GPIO.output(4,False)
GPIO.output(27,False)
GPIO.output(17,True)
GPIO.output(22,True)
pwm1.ChangeDutyCycle(u)
pwm2.ChangeDutyCycle(u)
elif u >0:
u=abs(u)
GPIO.output(4,True)
GPIO.output(27,True)
GPIO.output(17,False)
GPIO.output(22,False)
pwm1.ChangeDutyCycle(u)
pwm2.ChangeDutyCycle(u)
else:
GPIO.output(4,False)
GPIO.output(27,False)
GPIO.output(17,True)
GPIO.output(22,True)
# pwm1.start(0)
# pwm2.start(0)
#-------------------------MAIN-----------------------------
if __name__ == "__main__":
print('Inicializando Sensor...')
while(True):
try:
data_Arduino=Arduino.readline()
separar(data_Arduino)
except KeyboardInterrupt:
print "Algo va mal :^("
break
|
normal
|
{
"blob_id": "647aa37c53aac7c620e5095c7a9368f4ad038608",
"index": 8209,
"step-1": "import serial, time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(4, GPIO.OUT)\nGPIO.setup(17, GPIO.OUT)\nGPIO.setup(27, GPIO.OUT)\nGPIO.setup(22, GPIO.OUT)\nGPIO.setup(23, GPIO.OUT)\nGPIO.setup(24, GPIO.OUT)\nGPIO.setup(7, GPIO.OUT)\n\npwm1 = GPIO.PWM(23,100)\npwm2 = GPIO.PWM(24,100)\npwm1.start(100)\npwm2.start(100)\n\nerror_1=0\nui_1=0\n#Arduino\nArduino=serial.Serial(\"/dev/ttyACM0\",baudrate=9600,timeout=5) \nArduino.flushInput()\n\ndef separar(data):\n if \"distancia\" in data:\n\tlabel = data.split(\":\")\n\tdist = float(label[1])\n\tprint \"distancia US: \" + str(dist)\n \tglobal error_1, ui_1\n\tkp=3\n\tkd=0.2\n\tki=0.2\n\tref=15\n\tTs=0.020\n\terror=(ref - dist)\n\tup=(kp*error)\n\tui=ki*(error)+ui_1*Ts\n\tud=(kd/Ts)*(error- (error_1))\n\tu=up+ui+ud\n\tprint \"error:\" + str(error)\n\tprint \"velocidad\" + str(u)\n\terror_1=error\n\tui_1=ui\t\n\tif u >100:\n\t\tu=100\n\tif u <-100:\n\t\tu=-100\n\tif u <0:\n\t\tu=abs(u)\n\t\tGPIO.output(4,False)\n\t\tGPIO.output(27,False)\n\t\tGPIO.output(17,True)\n\t\tGPIO.output(22,True)\n\t\tpwm1.ChangeDutyCycle(u)\n\t\tpwm2.ChangeDutyCycle(u)\n\telif u >0:\n\t\tu=abs(u)\n GPIO.output(4,True)\n GPIO.output(27,True)\n GPIO.output(17,False)\n GPIO.output(22,False)\n pwm1.ChangeDutyCycle(u)\n pwm2.ChangeDutyCycle(u)\n\telse:\n GPIO.output(4,False)\n GPIO.output(27,False)\n GPIO.output(17,True)\n GPIO.output(22,True)\n# pwm1.start(0)\n# pwm2.start(0) \n#-------------------------MAIN-----------------------------\nif __name__ == \"__main__\":\n print('Inicializando Sensor...')\n while(True):\n try:\n \tdata_Arduino=Arduino.readline()\n\t\tseparar(data_Arduino)\n except KeyboardInterrupt:\n print \"Algo va mal :^(\"\n break\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding: utf-8
# 02. 「パトカー」+「タクシー」=「パタトクカシーー」
# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.
s1 = "パトカー"
s2 = "タクシー"
ans = ""
for c1, c2 in zip(s1, s2):
ans += c1 + c2
print(ans)
#パタトクカシーー
|
normal
|
{
"blob_id": "4d7e30714ae209e1d09d895dadf7a19928fe253f",
"index": 6623,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\nprint(ans)\n",
"step-3": "s1 = 'パトカー'\ns2 = 'タクシー'\nans = ''\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\nprint(ans)\n",
"step-4": "# coding: utf-8\n\n# 02. 「パトカー」+「タクシー」=「パタトクカシーー」\n# 「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\n\ns1 = \"パトカー\"\ns2 = \"タクシー\"\n\nans = \"\"\nfor c1, c2 in zip(s1, s2):\n ans += c1 + c2\n\nprint(ans)\n#パタトクカシーー\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
pyautogui 屏幕像素获取、屏幕像素匹配
@author : zhouhuajian
@version : v1.0
"""
from pyautogui import pixel, pixelMatchesColor, screenshot
"""
主要内容:
1. pixel() - 获取屏幕像素;
2. pixelMatchesColor() - 屏幕像素匹配颜色。
"""
def test_pixel_pixelMatchesColor():
"""屏幕像素获取、屏幕像素匹配"""
# img = screenshot()
# print(img)
# print(img.getpixel((44, 107)))
# (149, 212, 234)
# print(pixel(44, 107))
# 根据上面返回值修改color
# print(pixelMatchesColor(44, 107, (149, 212, 234)))
# print(pixelMatchesColor(44, 107, (148, 212, 234)))
# color简单调整
print(pixelMatchesColor(44, 107, (148, 212, 234), tolerance=20))
print(pixelMatchesColor(44, 107, (100, 212, 234), tolerance=20))
# 看看小项目 重试、等待
test_pixel_pixelMatchesColor()
|
normal
|
{
"blob_id": "c15faf9df8fa2e1ad89ea2c922ab0551eaa69d3f",
"index": 1936,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_pixel_pixelMatchesColor():\n \"\"\"屏幕像素获取、屏幕像素匹配\"\"\"\n print(pixelMatchesColor(44, 107, (148, 212, 234), tolerance=20))\n print(pixelMatchesColor(44, 107, (100, 212, 234), tolerance=20))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_pixel_pixelMatchesColor():\n \"\"\"屏幕像素获取、屏幕像素匹配\"\"\"\n print(pixelMatchesColor(44, 107, (148, 212, 234), tolerance=20))\n print(pixelMatchesColor(44, 107, (100, 212, 234), tolerance=20))\n\n\ntest_pixel_pixelMatchesColor()\n",
"step-4": "<mask token>\nfrom pyautogui import pixel, pixelMatchesColor, screenshot\n<mask token>\n\n\ndef test_pixel_pixelMatchesColor():\n \"\"\"屏幕像素获取、屏幕像素匹配\"\"\"\n print(pixelMatchesColor(44, 107, (148, 212, 234), tolerance=20))\n print(pixelMatchesColor(44, 107, (100, 212, 234), tolerance=20))\n\n\ntest_pixel_pixelMatchesColor()\n",
"step-5": "\"\"\"\npyautogui 屏幕像素获取、屏幕像素匹配\n\n@author : zhouhuajian\n@version : v1.0\n\"\"\"\n\nfrom pyautogui import pixel, pixelMatchesColor, screenshot\n\n\"\"\"\n主要内容:\n1. pixel() - 获取屏幕像素;\n2. pixelMatchesColor() - 屏幕像素匹配颜色。\n\"\"\"\n\n\ndef test_pixel_pixelMatchesColor():\n \"\"\"屏幕像素获取、屏幕像素匹配\"\"\"\n # img = screenshot()\n # print(img)\n # print(img.getpixel((44, 107)))\n # (149, 212, 234)\n\n # print(pixel(44, 107))\n\n # 根据上面返回值修改color\n # print(pixelMatchesColor(44, 107, (149, 212, 234)))\n # print(pixelMatchesColor(44, 107, (148, 212, 234)))\n\n # color简单调整\n print(pixelMatchesColor(44, 107, (148, 212, 234), tolerance=20))\n print(pixelMatchesColor(44, 107, (100, 212, 234), tolerance=20))\n\n # 看看小项目 重试、等待\n\n\ntest_pixel_pixelMatchesColor()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
for _ in range(int(input())):
n = int(input())
s = input()
cur = 0
for i in s[::-1]:
if i==')':
cur+=1
else:
break
print("Yes") if cur>n//2 else print("No")
|
normal
|
{
"blob_id": "31b420adebbe0d3ee6da2ed8236ece1526bdb063",
"index": 6290,
"step-1": "<mask token>\n",
"step-2": "for _ in range(int(input())):\n n = int(input())\n s = input()\n cur = 0\n for i in s[::-1]:\n if i == ')':\n cur += 1\n else:\n break\n print('Yes') if cur > n // 2 else print('No')\n",
"step-3": "for _ in range(int(input())):\r\n n = int(input())\r\n s = input()\r\n cur = 0\r\n for i in s[::-1]:\r\n if i==')':\r\n cur+=1\r\n else:\r\n break\r\n print(\"Yes\") if cur>n//2 else print(\"No\")\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#This models.py is under UserRegApp1 folder
from django.db import models
# Create your models here.
class UserRegModel(models.Model):
username = models.CharField(max_length=15)
emailid = models.EmailField()
password1= models.CharField(max_length=6)
password2= models.CharField(max_length=6)
mailsent = models.CharField(max_length=1)
#def __str__(self):
# return self.title
|
normal
|
{
"blob_id": "d0653dac8e7c8162070ed9fd191f7fb318f47c60",
"index": 1719,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserRegModel(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass UserRegModel(models.Model):\n username = models.CharField(max_length=15)\n emailid = models.EmailField()\n password1 = models.CharField(max_length=6)\n password2 = models.CharField(max_length=6)\n mailsent = models.CharField(max_length=1)\n",
"step-4": "from django.db import models\n\n\nclass UserRegModel(models.Model):\n username = models.CharField(max_length=15)\n emailid = models.EmailField()\n password1 = models.CharField(max_length=6)\n password2 = models.CharField(max_length=6)\n mailsent = models.CharField(max_length=1)\n",
"step-5": "#This models.py is under UserRegApp1 folder\nfrom django.db import models\n\n# Create your models here.\n\nclass UserRegModel(models.Model):\n username = models.CharField(max_length=15)\n emailid = models.EmailField()\n password1= models.CharField(max_length=6)\n password2= models.CharField(max_length=6)\n mailsent = models.CharField(max_length=1)\n\n #def __str__(self):\n # return self.title\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import heapq
class KdNode:
"""
node of kdtree.
"""
def __init__(self, depth, splitting_feature, splitting_value, idx, parent):
"""
:param depth: depth of the node.
:param splitting_feature: split samples by which feature.
:param splitting_value: split samples by which feature value.
:param idx: indices of samples in the dataset.
:param parent: the parent node if it exists.
"""
self.depth = depth
self.splitting_feature = splitting_feature
self.splitting_value = splitting_value
self.idx = idx
self.parent = parent
# left and right children
self.left, self.right = None, None
class KdTree:
"""an efficient algorithm of find k-nearest-neighbours
https://en.wikipedia.org/wiki/K-d_tree
pseudo-code: (construct)
input: X, shape is (n_samples, n_features). dimension k
output: k-d tree
(1) start: divide all samples in X into two equal-sized collections by the median of the
first feature. Construct a root whose depth is 1. For samples equal to the median,
store them at the root. Store samples < median at the left child of the root,
and those > median at the right child.
(2) repeat: for nodes of depth j, select the l-th feature as splitting axis. l = j(mod k).
divide samples in the node by the median of the l-th feature. store samples equal to
the median at the node, and split other samples into left and right children on whether
they < median.
(3) terminate: terminate until no samples in left and right subtrees of the node.
pseudo-code: (search)
input: k-d tree, target sample x.
output: k nearest neighbours of x. (a list 'k-nn')
(1) top-down: starting from the root. if the feature value of the splitting axis of x is smaller
than the splitting threshold (the median of 1st feature) of the root, move it to the left
child. else to the right child. go down recursively until reach a leaf. append samples of
the leaf to a list 'k-nn'.
(2) bottom-up: move to the parent of current node. If the max distance from x to samples in
'k-nn' is larger than the distance from x to the splitting threshold of the parent, search
for samples in the right subtree which is closer to x than some samples in 'k-nn'. If
successfully find some, replace those 'furthest' samples in 'k-nn' with closer samples
if the size of 'k-nn' > k.
(3) terminate: terminate if reach the root and finish checking its right subtree.
"""
def __init__(self):
self.root = None
def create(self, X, dimensions=None):
"""
create a kd-tree on data X.
:param X: shape is (n_samples, n_features).
:param dimensions: the max number of features chosen for splitting samples. if None, set to
be n_features.
:return: None
"""
n_samples, n_features = X.shape
self.X = X
if not dimensions:
dimensions = n_features
self.root = KdNode(depth=0,
splitting_feature=0,
splitting_value=np.median(X[:, 0]),
idx=np.arange(n_samples),
parent=None)
# grow the tree by DFS
stack = [self.root]
while stack:
node = stack.pop()
# splitting samples in the node into two children
sample_values = X[node.idx, node.splitting_feature]
left_idx = node.idx[sample_values < node.splitting_value]
right_idx = node.idx[sample_values > node.splitting_value]
node.idx = node.idx[sample_values == node.splitting_value]
# since left and right subtrees are divided by the median of their parent,
# the sizes of the two subtrees are expected to be equal
assert len(left_idx) == len(right_idx),\
'left and right subtrees should have the same number of samples'
# append left and right children
if len(left_idx):
child_depth = node.depth + 1
child_feature = (node.depth + 1) % dimensions
left_value = np.median(X[left_idx, child_feature])
node.left = KdNode(depth=child_depth, splitting_feature=child_feature,
splitting_value=left_value, idx=left_idx, parent=node)
right_value = np.median(X[right_idx, child_feature])
node.right = KdNode(depth=child_depth, splitting_feature=child_feature,
splitting_value=right_value, idx=right_idx, parent=node)
stack.append(node.left)
stack.append(node.right)
def _search(self, x, k=3):
"""
:param x: the target sample point. shape is (n_features,)
:param k: the number of nearest neighbours to find.
:return: a list of k nearest neighbours.
"""
# top-down
cur_node = self.root
# kd-tree is actually a full binary tree
while cur_node.left:
if x[cur_node.splitting_feature] <= cur_node.splitting_value:
cur_node = cur_node.left
else:
cur_node = cur_node.right
# append samples in cur_node into k_nn. k_nn is a max heap
k_nn = []
# bottom-top
while cur_node:
for idx in cur_node.idx:
# Euclidean distance
dist = np.linalg.norm(self.X[idx] - x)
# negate the dist to construct a max heap
heapq.heappush(k_nn, (-dist, idx))
if abs(x[cur_node.splitting_feature] - cur_node.splitting_value) < -k_nn[0][0] or len(k_nn) < k:
# the max distance from x to samples in 'k-nn' > the distance from x to the splitting threshold
# check samples of another child
if x[cur_node.splitting_feature] <= cur_node.splitting_value:
checking_samples = self._samples_of_subtree(cur_node.right, x, k)
else:
checking_samples = self._samples_of_subtree(cur_node.left, x, k)
k_nn.extend(checking_samples)
heapq.heapify(k_nn)
# keep the size of k_nn <= k
while len(k_nn) > k:
heapq.heappop(k_nn)
cur_node = cur_node.parent
# sort k_nn
k_nn.sort(reverse=True)
dists, idxs = zip(*k_nn)
return [-d for d in dists], list(idxs)
def search(self, X, k=3):
"""
:param X: the target sample points. shape is (n_samples, n_features)
:param k: the number of nearest neighbours to find.
:return: lists of k nearest neighbours for each sample point.
"""
assert self.root, 'must create a tree before search'
result = [self._search(x, k) for x in X]
dists, idxs = zip(*result)
return np.array(dists), np.array(idxs)
def _samples_of_subtree(self, root, x, k):
# get k nearest neighbours from the subtree rooted at root
k_nn = []
def dfs(node):
if not node:
return
for idx in node.idx:
dist = np.linalg.norm(x - self.X[idx])
heapq.heappush(k_nn, (-dist, idx))
while len(k_nn) > k:
heapq.heappop(k_nn)
if len(k_nn) < k or \
(0 < len(k_nn) and abs(x[node.splitting_feature] - node.splitting_value) < -k_nn[0][0]):
# have to search both two children
dfs(node.left)
dfs(node.right)
else:
if x[node.splitting_feature] <= node.splitting_value:
dfs(node.left)
else:
dfs(node.right)
dfs(root)
return k_nn
if __name__ == '__main__':
from sklearn.neighbors import NearestNeighbors
n_samples, n_features = 2000, 10
n_test = 100
K = 5
X = np.random.random((n_samples, n_features))
test_X = np.random.random((n_test, n_features))
nbrs = NearestNeighbors(n_neighbors=K, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(test_X)
tree = KdTree()
tree.create(X)
dists, idxs = tree.search(test_X, k=K)
print(np.all(distances == dists))
print(np.all(indices == idxs))
|
normal
|
{
"blob_id": "2f16c74e51789dd06bfc1fe1c6173fa5b0ac38cd",
"index": 4747,
"step-1": "<mask token>\n\n\nclass KdTree:\n <mask token>\n\n def __init__(self):\n self.root = None\n\n def create(self, X, dimensions=None):\n \"\"\"\n create a kd-tree on data X.\n :param X: shape is (n_samples, n_features).\n :param dimensions: the max number of features chosen for splitting samples. if None, set to\n be n_features.\n :return: None\n \"\"\"\n n_samples, n_features = X.shape\n self.X = X\n if not dimensions:\n dimensions = n_features\n self.root = KdNode(depth=0, splitting_feature=0, splitting_value=np\n .median(X[:, 0]), idx=np.arange(n_samples), parent=None)\n stack = [self.root]\n while stack:\n node = stack.pop()\n sample_values = X[node.idx, node.splitting_feature]\n left_idx = node.idx[sample_values < node.splitting_value]\n right_idx = node.idx[sample_values > node.splitting_value]\n node.idx = node.idx[sample_values == node.splitting_value]\n assert len(left_idx) == len(right_idx\n ), 'left and right subtrees should have the same number of samples'\n if len(left_idx):\n child_depth = node.depth + 1\n child_feature = (node.depth + 1) % dimensions\n left_value = np.median(X[left_idx, child_feature])\n node.left = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=left_value, idx=left_idx,\n parent=node)\n right_value = np.median(X[right_idx, child_feature])\n node.right = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=right_value, idx=\n right_idx, parent=node)\n stack.append(node.left)\n stack.append(node.right)\n\n def _search(self, x, k=3):\n \"\"\"\n :param x: the target sample point. shape is (n_features,)\n :param k: the number of nearest neighbours to find.\n :return: a list of k nearest neighbours.\n \"\"\"\n cur_node = self.root\n while cur_node.left:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n k_nn = []\n while cur_node:\n for idx in cur_node.idx:\n dist = np.linalg.norm(self.X[idx] - x)\n heapq.heappush(k_nn, (-dist, idx))\n if abs(x[cur_node.splitting_feature] - cur_node.splitting_value\n ) < -k_nn[0][0] or len(k_nn) < k:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n checking_samples = self._samples_of_subtree(cur_node.\n right, x, k)\n else:\n checking_samples = self._samples_of_subtree(cur_node.\n left, x, k)\n k_nn.extend(checking_samples)\n heapq.heapify(k_nn)\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n cur_node = cur_node.parent\n k_nn.sort(reverse=True)\n dists, idxs = zip(*k_nn)\n return [(-d) for d in dists], list(idxs)\n\n def search(self, X, k=3):\n \"\"\"\n :param X: the target sample points. shape is (n_samples, n_features)\n :param k: the number of nearest neighbours to find.\n :return: lists of k nearest neighbours for each sample point.\n \"\"\"\n assert self.root, 'must create a tree before search'\n result = [self._search(x, k) for x in X]\n dists, idxs = zip(*result)\n return np.array(dists), np.array(idxs)\n\n def _samples_of_subtree(self, root, x, k):\n k_nn = []\n\n def dfs(node):\n if not node:\n return\n for idx in node.idx:\n dist = np.linalg.norm(x - self.X[idx])\n heapq.heappush(k_nn, (-dist, idx))\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n if len(k_nn) < k or 0 < len(k_nn) and abs(x[node.\n splitting_feature] - node.splitting_value) < -k_nn[0][0]:\n dfs(node.left)\n dfs(node.right)\n elif x[node.splitting_feature] <= node.splitting_value:\n dfs(node.left)\n else:\n dfs(node.right)\n dfs(root)\n return k_nn\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KdTree:\n \"\"\"an efficient algorithm of find k-nearest-neighbours\n https://en.wikipedia.org/wiki/K-d_tree\n\n pseudo-code: (construct)\n input: X, shape is (n_samples, n_features). dimension k\n output: k-d tree\n\n (1) start: divide all samples in X into two equal-sized collections by the median of the\n first feature. Construct a root whose depth is 1. For samples equal to the median,\n store them at the root. Store samples < median at the left child of the root,\n and those > median at the right child.\n (2) repeat: for nodes of depth j, select the l-th feature as splitting axis. l = j(mod k).\n divide samples in the node by the median of the l-th feature. store samples equal to\n the median at the node, and split other samples into left and right children on whether\n they < median.\n (3) terminate: terminate until no samples in left and right subtrees of the node.\n\n pseudo-code: (search)\n input: k-d tree, target sample x.\n output: k nearest neighbours of x. (a list 'k-nn')\n\n (1) top-down: starting from the root. if the feature value of the splitting axis of x is smaller\n than the splitting threshold (the median of 1st feature) of the root, move it to the left\n child. else to the right child. go down recursively until reach a leaf. append samples of\n the leaf to a list 'k-nn'.\n (2) bottom-up: move to the parent of current node. If the max distance from x to samples in\n 'k-nn' is larger than the distance from x to the splitting threshold of the parent, search\n for samples in the right subtree which is closer to x than some samples in 'k-nn'. If\n successfully find some, replace those 'furthest' samples in 'k-nn' with closer samples\n if the size of 'k-nn' > k.\n (3) terminate: terminate if reach the root and finish checking its right subtree.\n \"\"\"\n\n def __init__(self):\n self.root = None\n\n def create(self, X, dimensions=None):\n \"\"\"\n create a kd-tree on data X.\n :param X: shape is (n_samples, n_features).\n :param dimensions: the max number of features chosen for splitting samples. if None, set to\n be n_features.\n :return: None\n \"\"\"\n n_samples, n_features = X.shape\n self.X = X\n if not dimensions:\n dimensions = n_features\n self.root = KdNode(depth=0, splitting_feature=0, splitting_value=np\n .median(X[:, 0]), idx=np.arange(n_samples), parent=None)\n stack = [self.root]\n while stack:\n node = stack.pop()\n sample_values = X[node.idx, node.splitting_feature]\n left_idx = node.idx[sample_values < node.splitting_value]\n right_idx = node.idx[sample_values > node.splitting_value]\n node.idx = node.idx[sample_values == node.splitting_value]\n assert len(left_idx) == len(right_idx\n ), 'left and right subtrees should have the same number of samples'\n if len(left_idx):\n child_depth = node.depth + 1\n child_feature = (node.depth + 1) % dimensions\n left_value = np.median(X[left_idx, child_feature])\n node.left = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=left_value, idx=left_idx,\n parent=node)\n right_value = np.median(X[right_idx, child_feature])\n node.right = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=right_value, idx=\n right_idx, parent=node)\n stack.append(node.left)\n stack.append(node.right)\n\n def _search(self, x, k=3):\n \"\"\"\n :param x: the target sample point. shape is (n_features,)\n :param k: the number of nearest neighbours to find.\n :return: a list of k nearest neighbours.\n \"\"\"\n cur_node = self.root\n while cur_node.left:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n k_nn = []\n while cur_node:\n for idx in cur_node.idx:\n dist = np.linalg.norm(self.X[idx] - x)\n heapq.heappush(k_nn, (-dist, idx))\n if abs(x[cur_node.splitting_feature] - cur_node.splitting_value\n ) < -k_nn[0][0] or len(k_nn) < k:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n checking_samples = self._samples_of_subtree(cur_node.\n right, x, k)\n else:\n checking_samples = self._samples_of_subtree(cur_node.\n left, x, k)\n k_nn.extend(checking_samples)\n heapq.heapify(k_nn)\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n cur_node = cur_node.parent\n k_nn.sort(reverse=True)\n dists, idxs = zip(*k_nn)\n return [(-d) for d in dists], list(idxs)\n\n def search(self, X, k=3):\n \"\"\"\n :param X: the target sample points. shape is (n_samples, n_features)\n :param k: the number of nearest neighbours to find.\n :return: lists of k nearest neighbours for each sample point.\n \"\"\"\n assert self.root, 'must create a tree before search'\n result = [self._search(x, k) for x in X]\n dists, idxs = zip(*result)\n return np.array(dists), np.array(idxs)\n\n def _samples_of_subtree(self, root, x, k):\n k_nn = []\n\n def dfs(node):\n if not node:\n return\n for idx in node.idx:\n dist = np.linalg.norm(x - self.X[idx])\n heapq.heappush(k_nn, (-dist, idx))\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n if len(k_nn) < k or 0 < len(k_nn) and abs(x[node.\n splitting_feature] - node.splitting_value) < -k_nn[0][0]:\n dfs(node.left)\n dfs(node.right)\n elif x[node.splitting_feature] <= node.splitting_value:\n dfs(node.left)\n else:\n dfs(node.right)\n dfs(root)\n return k_nn\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass KdNode:\n \"\"\"\n node of kdtree.\n \"\"\"\n\n def __init__(self, depth, splitting_feature, splitting_value, idx, parent):\n \"\"\"\n :param depth: depth of the node.\n :param splitting_feature: split samples by which feature.\n :param splitting_value: split samples by which feature value.\n :param idx: indices of samples in the dataset.\n :param parent: the parent node if it exists.\n \"\"\"\n self.depth = depth\n self.splitting_feature = splitting_feature\n self.splitting_value = splitting_value\n self.idx = idx\n self.parent = parent\n self.left, self.right = None, None\n\n\nclass KdTree:\n \"\"\"an efficient algorithm of find k-nearest-neighbours\n https://en.wikipedia.org/wiki/K-d_tree\n\n pseudo-code: (construct)\n input: X, shape is (n_samples, n_features). dimension k\n output: k-d tree\n\n (1) start: divide all samples in X into two equal-sized collections by the median of the\n first feature. Construct a root whose depth is 1. For samples equal to the median,\n store them at the root. Store samples < median at the left child of the root,\n and those > median at the right child.\n (2) repeat: for nodes of depth j, select the l-th feature as splitting axis. l = j(mod k).\n divide samples in the node by the median of the l-th feature. store samples equal to\n the median at the node, and split other samples into left and right children on whether\n they < median.\n (3) terminate: terminate until no samples in left and right subtrees of the node.\n\n pseudo-code: (search)\n input: k-d tree, target sample x.\n output: k nearest neighbours of x. (a list 'k-nn')\n\n (1) top-down: starting from the root. if the feature value of the splitting axis of x is smaller\n than the splitting threshold (the median of 1st feature) of the root, move it to the left\n child. else to the right child. go down recursively until reach a leaf. append samples of\n the leaf to a list 'k-nn'.\n (2) bottom-up: move to the parent of current node. If the max distance from x to samples in\n 'k-nn' is larger than the distance from x to the splitting threshold of the parent, search\n for samples in the right subtree which is closer to x than some samples in 'k-nn'. If\n successfully find some, replace those 'furthest' samples in 'k-nn' with closer samples\n if the size of 'k-nn' > k.\n (3) terminate: terminate if reach the root and finish checking its right subtree.\n \"\"\"\n\n def __init__(self):\n self.root = None\n\n def create(self, X, dimensions=None):\n \"\"\"\n create a kd-tree on data X.\n :param X: shape is (n_samples, n_features).\n :param dimensions: the max number of features chosen for splitting samples. if None, set to\n be n_features.\n :return: None\n \"\"\"\n n_samples, n_features = X.shape\n self.X = X\n if not dimensions:\n dimensions = n_features\n self.root = KdNode(depth=0, splitting_feature=0, splitting_value=np\n .median(X[:, 0]), idx=np.arange(n_samples), parent=None)\n stack = [self.root]\n while stack:\n node = stack.pop()\n sample_values = X[node.idx, node.splitting_feature]\n left_idx = node.idx[sample_values < node.splitting_value]\n right_idx = node.idx[sample_values > node.splitting_value]\n node.idx = node.idx[sample_values == node.splitting_value]\n assert len(left_idx) == len(right_idx\n ), 'left and right subtrees should have the same number of samples'\n if len(left_idx):\n child_depth = node.depth + 1\n child_feature = (node.depth + 1) % dimensions\n left_value = np.median(X[left_idx, child_feature])\n node.left = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=left_value, idx=left_idx,\n parent=node)\n right_value = np.median(X[right_idx, child_feature])\n node.right = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=right_value, idx=\n right_idx, parent=node)\n stack.append(node.left)\n stack.append(node.right)\n\n def _search(self, x, k=3):\n \"\"\"\n :param x: the target sample point. shape is (n_features,)\n :param k: the number of nearest neighbours to find.\n :return: a list of k nearest neighbours.\n \"\"\"\n cur_node = self.root\n while cur_node.left:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n k_nn = []\n while cur_node:\n for idx in cur_node.idx:\n dist = np.linalg.norm(self.X[idx] - x)\n heapq.heappush(k_nn, (-dist, idx))\n if abs(x[cur_node.splitting_feature] - cur_node.splitting_value\n ) < -k_nn[0][0] or len(k_nn) < k:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n checking_samples = self._samples_of_subtree(cur_node.\n right, x, k)\n else:\n checking_samples = self._samples_of_subtree(cur_node.\n left, x, k)\n k_nn.extend(checking_samples)\n heapq.heapify(k_nn)\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n cur_node = cur_node.parent\n k_nn.sort(reverse=True)\n dists, idxs = zip(*k_nn)\n return [(-d) for d in dists], list(idxs)\n\n def search(self, X, k=3):\n \"\"\"\n :param X: the target sample points. shape is (n_samples, n_features)\n :param k: the number of nearest neighbours to find.\n :return: lists of k nearest neighbours for each sample point.\n \"\"\"\n assert self.root, 'must create a tree before search'\n result = [self._search(x, k) for x in X]\n dists, idxs = zip(*result)\n return np.array(dists), np.array(idxs)\n\n def _samples_of_subtree(self, root, x, k):\n k_nn = []\n\n def dfs(node):\n if not node:\n return\n for idx in node.idx:\n dist = np.linalg.norm(x - self.X[idx])\n heapq.heappush(k_nn, (-dist, idx))\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n if len(k_nn) < k or 0 < len(k_nn) and abs(x[node.\n splitting_feature] - node.splitting_value) < -k_nn[0][0]:\n dfs(node.left)\n dfs(node.right)\n elif x[node.splitting_feature] <= node.splitting_value:\n dfs(node.left)\n else:\n dfs(node.right)\n dfs(root)\n return k_nn\n\n\n<mask token>\n",
"step-4": "import numpy as np\nimport heapq\n\n\nclass KdNode:\n \"\"\"\n node of kdtree.\n \"\"\"\n\n def __init__(self, depth, splitting_feature, splitting_value, idx, parent):\n \"\"\"\n :param depth: depth of the node.\n :param splitting_feature: split samples by which feature.\n :param splitting_value: split samples by which feature value.\n :param idx: indices of samples in the dataset.\n :param parent: the parent node if it exists.\n \"\"\"\n self.depth = depth\n self.splitting_feature = splitting_feature\n self.splitting_value = splitting_value\n self.idx = idx\n self.parent = parent\n self.left, self.right = None, None\n\n\nclass KdTree:\n \"\"\"an efficient algorithm of find k-nearest-neighbours\n https://en.wikipedia.org/wiki/K-d_tree\n\n pseudo-code: (construct)\n input: X, shape is (n_samples, n_features). dimension k\n output: k-d tree\n\n (1) start: divide all samples in X into two equal-sized collections by the median of the\n first feature. Construct a root whose depth is 1. For samples equal to the median,\n store them at the root. Store samples < median at the left child of the root,\n and those > median at the right child.\n (2) repeat: for nodes of depth j, select the l-th feature as splitting axis. l = j(mod k).\n divide samples in the node by the median of the l-th feature. store samples equal to\n the median at the node, and split other samples into left and right children on whether\n they < median.\n (3) terminate: terminate until no samples in left and right subtrees of the node.\n\n pseudo-code: (search)\n input: k-d tree, target sample x.\n output: k nearest neighbours of x. (a list 'k-nn')\n\n (1) top-down: starting from the root. if the feature value of the splitting axis of x is smaller\n than the splitting threshold (the median of 1st feature) of the root, move it to the left\n child. else to the right child. go down recursively until reach a leaf. append samples of\n the leaf to a list 'k-nn'.\n (2) bottom-up: move to the parent of current node. If the max distance from x to samples in\n 'k-nn' is larger than the distance from x to the splitting threshold of the parent, search\n for samples in the right subtree which is closer to x than some samples in 'k-nn'. If\n successfully find some, replace those 'furthest' samples in 'k-nn' with closer samples\n if the size of 'k-nn' > k.\n (3) terminate: terminate if reach the root and finish checking its right subtree.\n \"\"\"\n\n def __init__(self):\n self.root = None\n\n def create(self, X, dimensions=None):\n \"\"\"\n create a kd-tree on data X.\n :param X: shape is (n_samples, n_features).\n :param dimensions: the max number of features chosen for splitting samples. if None, set to\n be n_features.\n :return: None\n \"\"\"\n n_samples, n_features = X.shape\n self.X = X\n if not dimensions:\n dimensions = n_features\n self.root = KdNode(depth=0, splitting_feature=0, splitting_value=np\n .median(X[:, 0]), idx=np.arange(n_samples), parent=None)\n stack = [self.root]\n while stack:\n node = stack.pop()\n sample_values = X[node.idx, node.splitting_feature]\n left_idx = node.idx[sample_values < node.splitting_value]\n right_idx = node.idx[sample_values > node.splitting_value]\n node.idx = node.idx[sample_values == node.splitting_value]\n assert len(left_idx) == len(right_idx\n ), 'left and right subtrees should have the same number of samples'\n if len(left_idx):\n child_depth = node.depth + 1\n child_feature = (node.depth + 1) % dimensions\n left_value = np.median(X[left_idx, child_feature])\n node.left = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=left_value, idx=left_idx,\n parent=node)\n right_value = np.median(X[right_idx, child_feature])\n node.right = KdNode(depth=child_depth, splitting_feature=\n child_feature, splitting_value=right_value, idx=\n right_idx, parent=node)\n stack.append(node.left)\n stack.append(node.right)\n\n def _search(self, x, k=3):\n \"\"\"\n :param x: the target sample point. shape is (n_features,)\n :param k: the number of nearest neighbours to find.\n :return: a list of k nearest neighbours.\n \"\"\"\n cur_node = self.root\n while cur_node.left:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n k_nn = []\n while cur_node:\n for idx in cur_node.idx:\n dist = np.linalg.norm(self.X[idx] - x)\n heapq.heappush(k_nn, (-dist, idx))\n if abs(x[cur_node.splitting_feature] - cur_node.splitting_value\n ) < -k_nn[0][0] or len(k_nn) < k:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n checking_samples = self._samples_of_subtree(cur_node.\n right, x, k)\n else:\n checking_samples = self._samples_of_subtree(cur_node.\n left, x, k)\n k_nn.extend(checking_samples)\n heapq.heapify(k_nn)\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n cur_node = cur_node.parent\n k_nn.sort(reverse=True)\n dists, idxs = zip(*k_nn)\n return [(-d) for d in dists], list(idxs)\n\n def search(self, X, k=3):\n \"\"\"\n :param X: the target sample points. shape is (n_samples, n_features)\n :param k: the number of nearest neighbours to find.\n :return: lists of k nearest neighbours for each sample point.\n \"\"\"\n assert self.root, 'must create a tree before search'\n result = [self._search(x, k) for x in X]\n dists, idxs = zip(*result)\n return np.array(dists), np.array(idxs)\n\n def _samples_of_subtree(self, root, x, k):\n k_nn = []\n\n def dfs(node):\n if not node:\n return\n for idx in node.idx:\n dist = np.linalg.norm(x - self.X[idx])\n heapq.heappush(k_nn, (-dist, idx))\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n if len(k_nn) < k or 0 < len(k_nn) and abs(x[node.\n splitting_feature] - node.splitting_value) < -k_nn[0][0]:\n dfs(node.left)\n dfs(node.right)\n elif x[node.splitting_feature] <= node.splitting_value:\n dfs(node.left)\n else:\n dfs(node.right)\n dfs(root)\n return k_nn\n\n\nif __name__ == '__main__':\n from sklearn.neighbors import NearestNeighbors\n n_samples, n_features = 2000, 10\n n_test = 100\n K = 5\n X = np.random.random((n_samples, n_features))\n test_X = np.random.random((n_test, n_features))\n nbrs = NearestNeighbors(n_neighbors=K, algorithm='ball_tree').fit(X)\n distances, indices = nbrs.kneighbors(test_X)\n tree = KdTree()\n tree.create(X)\n dists, idxs = tree.search(test_X, k=K)\n print(np.all(distances == dists))\n print(np.all(indices == idxs))\n",
"step-5": "import numpy as np\nimport heapq\n\n\nclass KdNode:\n \"\"\"\n node of kdtree.\n \"\"\"\n def __init__(self, depth, splitting_feature, splitting_value, idx, parent):\n \"\"\"\n :param depth: depth of the node.\n :param splitting_feature: split samples by which feature.\n :param splitting_value: split samples by which feature value.\n :param idx: indices of samples in the dataset.\n :param parent: the parent node if it exists.\n \"\"\"\n self.depth = depth\n self.splitting_feature = splitting_feature\n self.splitting_value = splitting_value\n self.idx = idx\n self.parent = parent\n # left and right children\n self.left, self.right = None, None\n\n\nclass KdTree:\n \"\"\"an efficient algorithm of find k-nearest-neighbours\n https://en.wikipedia.org/wiki/K-d_tree\n\n pseudo-code: (construct)\n input: X, shape is (n_samples, n_features). dimension k\n output: k-d tree\n\n (1) start: divide all samples in X into two equal-sized collections by the median of the\n first feature. Construct a root whose depth is 1. For samples equal to the median,\n store them at the root. Store samples < median at the left child of the root,\n and those > median at the right child.\n (2) repeat: for nodes of depth j, select the l-th feature as splitting axis. l = j(mod k).\n divide samples in the node by the median of the l-th feature. store samples equal to\n the median at the node, and split other samples into left and right children on whether\n they < median.\n (3) terminate: terminate until no samples in left and right subtrees of the node.\n\n pseudo-code: (search)\n input: k-d tree, target sample x.\n output: k nearest neighbours of x. (a list 'k-nn')\n\n (1) top-down: starting from the root. if the feature value of the splitting axis of x is smaller\n than the splitting threshold (the median of 1st feature) of the root, move it to the left\n child. else to the right child. go down recursively until reach a leaf. append samples of\n the leaf to a list 'k-nn'.\n (2) bottom-up: move to the parent of current node. If the max distance from x to samples in\n 'k-nn' is larger than the distance from x to the splitting threshold of the parent, search\n for samples in the right subtree which is closer to x than some samples in 'k-nn'. If\n successfully find some, replace those 'furthest' samples in 'k-nn' with closer samples\n if the size of 'k-nn' > k.\n (3) terminate: terminate if reach the root and finish checking its right subtree.\n \"\"\"\n def __init__(self):\n self.root = None\n\n def create(self, X, dimensions=None):\n \"\"\"\n create a kd-tree on data X.\n :param X: shape is (n_samples, n_features).\n :param dimensions: the max number of features chosen for splitting samples. if None, set to\n be n_features.\n :return: None\n \"\"\"\n n_samples, n_features = X.shape\n self.X = X\n if not dimensions:\n dimensions = n_features\n\n self.root = KdNode(depth=0,\n splitting_feature=0,\n splitting_value=np.median(X[:, 0]),\n idx=np.arange(n_samples),\n parent=None)\n # grow the tree by DFS\n stack = [self.root]\n while stack:\n node = stack.pop()\n # splitting samples in the node into two children\n sample_values = X[node.idx, node.splitting_feature]\n left_idx = node.idx[sample_values < node.splitting_value]\n right_idx = node.idx[sample_values > node.splitting_value]\n node.idx = node.idx[sample_values == node.splitting_value]\n # since left and right subtrees are divided by the median of their parent,\n # the sizes of the two subtrees are expected to be equal\n assert len(left_idx) == len(right_idx),\\\n 'left and right subtrees should have the same number of samples'\n # append left and right children\n if len(left_idx):\n child_depth = node.depth + 1\n child_feature = (node.depth + 1) % dimensions\n left_value = np.median(X[left_idx, child_feature])\n node.left = KdNode(depth=child_depth, splitting_feature=child_feature,\n splitting_value=left_value, idx=left_idx, parent=node)\n right_value = np.median(X[right_idx, child_feature])\n node.right = KdNode(depth=child_depth, splitting_feature=child_feature,\n splitting_value=right_value, idx=right_idx, parent=node)\n stack.append(node.left)\n stack.append(node.right)\n\n def _search(self, x, k=3):\n \"\"\"\n :param x: the target sample point. shape is (n_features,)\n :param k: the number of nearest neighbours to find.\n :return: a list of k nearest neighbours.\n \"\"\"\n # top-down\n cur_node = self.root\n # kd-tree is actually a full binary tree\n while cur_node.left:\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n cur_node = cur_node.left\n else:\n cur_node = cur_node.right\n # append samples in cur_node into k_nn. k_nn is a max heap\n k_nn = []\n # bottom-top\n while cur_node:\n for idx in cur_node.idx:\n # Euclidean distance\n dist = np.linalg.norm(self.X[idx] - x)\n # negate the dist to construct a max heap\n heapq.heappush(k_nn, (-dist, idx))\n if abs(x[cur_node.splitting_feature] - cur_node.splitting_value) < -k_nn[0][0] or len(k_nn) < k:\n # the max distance from x to samples in 'k-nn' > the distance from x to the splitting threshold\n # check samples of another child\n if x[cur_node.splitting_feature] <= cur_node.splitting_value:\n checking_samples = self._samples_of_subtree(cur_node.right, x, k)\n else:\n checking_samples = self._samples_of_subtree(cur_node.left, x, k)\n k_nn.extend(checking_samples)\n heapq.heapify(k_nn)\n # keep the size of k_nn <= k\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n cur_node = cur_node.parent\n # sort k_nn\n k_nn.sort(reverse=True)\n dists, idxs = zip(*k_nn)\n return [-d for d in dists], list(idxs)\n\n def search(self, X, k=3):\n \"\"\"\n :param X: the target sample points. shape is (n_samples, n_features)\n :param k: the number of nearest neighbours to find.\n :return: lists of k nearest neighbours for each sample point.\n \"\"\"\n assert self.root, 'must create a tree before search'\n\n result = [self._search(x, k) for x in X]\n dists, idxs = zip(*result)\n return np.array(dists), np.array(idxs)\n\n def _samples_of_subtree(self, root, x, k):\n # get k nearest neighbours from the subtree rooted at root\n k_nn = []\n\n def dfs(node):\n if not node:\n return\n for idx in node.idx:\n dist = np.linalg.norm(x - self.X[idx])\n heapq.heappush(k_nn, (-dist, idx))\n while len(k_nn) > k:\n heapq.heappop(k_nn)\n if len(k_nn) < k or \\\n (0 < len(k_nn) and abs(x[node.splitting_feature] - node.splitting_value) < -k_nn[0][0]):\n # have to search both two children\n dfs(node.left)\n dfs(node.right)\n else:\n if x[node.splitting_feature] <= node.splitting_value:\n dfs(node.left)\n else:\n dfs(node.right)\n\n dfs(root)\n return k_nn\n\n\nif __name__ == '__main__':\n from sklearn.neighbors import NearestNeighbors\n n_samples, n_features = 2000, 10\n n_test = 100\n K = 5\n X = np.random.random((n_samples, n_features))\n test_X = np.random.random((n_test, n_features))\n nbrs = NearestNeighbors(n_neighbors=K, algorithm='ball_tree').fit(X)\n distances, indices = nbrs.kneighbors(test_X)\n tree = KdTree()\n tree.create(X)\n dists, idxs = tree.search(test_X, k=K)\n print(np.all(distances == dists))\n print(np.all(indices == idxs))\n",
"step-ids": [
6,
7,
10,
12,
13
]
}
|
[
6,
7,
10,
12,
13
] |
# -*- coding: utf-8 -*-
# @Time : 2019/9/17 17:48
# @Author : ZhangYang
# @Email : ian.zhang.88@outlook.com
from functools import wraps
def create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):
if not zk_client.exists(base_path):
zk_client.ensure_path(base_path)
new_node = zk_client.create( base_path+'/'+prefix, ''.encode('utf-8'), sequence=True, ephemeral=is_ephemeral )
return new_node
class SetGetMixin():
def get(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.zk_client.exists(getattr(self, path_variable)):
return None
return func(self, *args, **kwargs)
return wrapper
return decorator
def set(path_variable):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
self.zk_client.ensure_path(getattr(self, path_variable))
return func(self, *args, **kwargs)
return wrapper
return decorator
|
normal
|
{
"blob_id": "f9a0c3b643c2ee6bb6778477bf8fc21564812081",
"index": 3373,
"step-1": "<mask token>\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def set(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n",
"step-3": "<mask token>\n\n\ndef create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):\n if not zk_client.exists(base_path):\n zk_client.ensure_path(base_path)\n new_node = zk_client.create(base_path + '/' + prefix, ''.encode('utf-8'\n ), sequence=True, ephemeral=is_ephemeral)\n return new_node\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def set(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n",
"step-4": "from functools import wraps\n\n\ndef create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):\n if not zk_client.exists(base_path):\n zk_client.ensure_path(base_path)\n new_node = zk_client.create(base_path + '/' + prefix, ''.encode('utf-8'\n ), sequence=True, ephemeral=is_ephemeral)\n return new_node\n\n\nclass SetGetMixin:\n\n def get(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def set(path_variable):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Time : 2019/9/17 17:48\n# @Author : ZhangYang\n# @Email : ian.zhang.88@outlook.com\nfrom functools import wraps\n\n\ndef create_new_sequence_node(zk_client, base_path, prefix, is_ephemeral=False):\n if not zk_client.exists(base_path):\n zk_client.ensure_path(base_path)\n\n new_node = zk_client.create( base_path+'/'+prefix, ''.encode('utf-8'), sequence=True, ephemeral=is_ephemeral )\n return new_node\n\nclass SetGetMixin():\n def get(path_variable):\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n if not self.zk_client.exists(getattr(self, path_variable)):\n return None\n return func(self, *args, **kwargs)\n\n return wrapper\n return decorator\n\n def set(path_variable):\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n self.zk_client.ensure_path(getattr(self, path_variable))\n return func(self, *args, **kwargs)\n return wrapper\n return decorator",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding=utf-8
from smallinvoice.commons import BaseJsonEncodableObject, BaseService
class Catalog(BaseJsonEncodableObject):
def __init__(self, catalog_type, unit, name, cost_per_unit, vat=0):
self.type = catalog_type
self.unit = unit
self.name = name
self.cost_per_unit = cost_per_unit
self.vat = vat
class CatalogService(BaseService):
name = 'catalog'
|
normal
|
{
"blob_id": "37feeba8ff682e5998fde4bcba8c37043cb593f2",
"index": 5195,
"step-1": "<mask token>\n\n\nclass CatalogService(BaseService):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CatalogService(BaseService):\n name = 'catalog'\n",
"step-3": "<mask token>\n\n\nclass Catalog(BaseJsonEncodableObject):\n <mask token>\n\n\nclass CatalogService(BaseService):\n name = 'catalog'\n",
"step-4": "from smallinvoice.commons import BaseJsonEncodableObject, BaseService\n\n\nclass Catalog(BaseJsonEncodableObject):\n\n def __init__(self, catalog_type, unit, name, cost_per_unit, vat=0):\n self.type = catalog_type\n self.unit = unit\n self.name = name\n self.cost_per_unit = cost_per_unit\n self.vat = vat\n\n\nclass CatalogService(BaseService):\n name = 'catalog'\n",
"step-5": "# coding=utf-8\nfrom smallinvoice.commons import BaseJsonEncodableObject, BaseService\n\n\nclass Catalog(BaseJsonEncodableObject):\n def __init__(self, catalog_type, unit, name, cost_per_unit, vat=0):\n self.type = catalog_type\n self.unit = unit\n self.name = name\n self.cost_per_unit = cost_per_unit\n self.vat = vat\n\n\nclass CatalogService(BaseService):\n name = 'catalog'\n\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
# common methods to delete data from list
fruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']
#pop method
# fruits.pop(1)
# del
# del fruits[1]
# remove
# fruits.remove('banana')
# append, extend, insert
# pop, remove, del
print(fruits)
|
normal
|
{
"blob_id": "a245cb1f232b152edf40b6399686c6811c522d99",
"index": 6458,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(fruits)\n",
"step-3": "fruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']\nprint(fruits)\n",
"step-4": "# common methods to delete data from list\r\nfruits = ['orange', ' apple', 'pear', 'banana', 'kiwi']\r\n#pop method\r\n# fruits.pop(1)\r\n\r\n\r\n# del\r\n# del fruits[1]\r\n\r\n# remove\r\n\r\n# fruits.remove('banana')\r\n\r\n# append, extend, insert\r\n# pop, remove, del\r\n\r\nprint(fruits)\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render
# Create your views here.
def test_petite_vue(request):
return render(request, 'petite_vue_app/test-form.html')
|
normal
|
{
"blob_id": "709f2425bc6e0b0b650fd6c657df6d85cfbd05fe",
"index": 84,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_petite_vue(request):\n return render(request, 'petite_vue_app/test-form.html')\n",
"step-3": "from django.shortcuts import render\n\n\ndef test_petite_vue(request):\n return render(request, 'petite_vue_app/test-form.html')\n",
"step-4": "from django.shortcuts import render\n\n# Create your views here.\ndef test_petite_vue(request):\n return render(request, 'petite_vue_app/test-form.html')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.