code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
from django.urls import path, re_path
from .views import *
app_name = 'articles'
urlpatterns = [
path('',articles_list,name='list'),
path('create', create_article, name='create'),
path('<slug:slug>', article_detail,name='detail'),
]
|
normal
|
{
"blob_id": "1c222f42c5c0178f97391f1bdc60bba110f3d118",
"index": 9866,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'articles'\nurlpatterns = [path('', articles_list, name='list'), path('create',\n create_article, name='create'), path('<slug:slug>', article_detail,\n name='detail')]\n",
"step-3": "from django.urls import path, re_path\nfrom .views import *\napp_name = 'articles'\nurlpatterns = [path('', articles_list, name='list'), path('create',\n create_article, name='create'), path('<slug:slug>', article_detail,\n name='detail')]\n",
"step-4": "from django.urls import path, re_path\nfrom .views import *\n\napp_name = 'articles'\n\nurlpatterns = [\n path('',articles_list,name='list'),\n path('create', create_article, name='create'),\n path('<slug:slug>', article_detail,name='detail'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
from datetime import date, timedelta, datetime
from pytz import timezone
import store
import psycopg2
import requests
import os
import filters
FIRST = 4
def prepareDate():
pc_tz = timezone('US/Pacific')
n = datetime.now(pc_tz)
nd = n.date()
store.updateStore(today=nd)
def getData():
today = store.mapStore("today")
npdata = store.mapStore("data")
filedate = np.datetime64(today - timedelta(days=2))
try:
url = 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'.format(filedate)
download = requests.get(url)
download.encoding = "utf-8"
temp_file = open("temp/temp.csv", 'w', encoding='utf8')
temp_file.writelines(download.text)
npcsv = np.genfromtxt("temp/temp.csv", delimiter=',', dtype=np.str, encoding='utf8', invalid_raise=False, missing_values = np.nan, filling_values=np.nan)
temp_file.close()
store.updateStore(data=npcsv)
except Exception as e:
exceptions = store.mapStore("exceptions")
exceptions.append(e)
print("Not possible to read csv file .")
print(e)
def getDates():
dates = store.mapStore("dates")
data = store.mapStore("data")
exceptions = store.mapStore("exceptions")
if(len(exceptions) > 0):
return False
try:
d0 = date(2020, 1, 13)
d1 = data[0,FIRST:]
i = 0
newdates = []
while i <= d1.shape[0] - 1:
diffday = np.datetime64(d0 + timedelta(days=i))
newdates.append(diffday)
i += 1
newdates = np.concatenate((dates, newdates))
store.updateStore(dates=newdates)
except Exception as e:
exceptions = store.mapStore("exceptions")
exceptions.append(e)
print("Problems with handling data numpy array")
print(e)
return True
def addDataToDB(conn, filterData):
data = store.mapStore("data")
dates = store.mapStore("dates")
exceptions = store.mapStore("exceptions")
if(len(exceptions) > 0):
return False
dataValues = data[1:,FIRST:]
datesValues = dates
if(filterData is not None):
datesValues = datesValues[filterData]
dataValues = dataValues[:,filterData]
sql = "INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)"
for ix,iy in np.ndindex(dataValues.shape):
try:
date = datesValues[iy].astype(datetime)
values = data[ix+1, :FIRST]
values = tuple(values.tolist())
item = dataValues[ix, iy].item()
try:
item = float(item)
except:
item = None
values = values + tuple([date, item])
cursor = conn.cursor()
cursor.execute(sql, values)
conn.commit()
cursor.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
exceptions = store.mapStore("exceptions")
exceptions.append(error)
def addPercentileMessageToDB():
data = store.mapStore("data")
states_walking = filters.filterStates(data[1:, :])
states_driving = filters.filterStates(data[1:, :], "driving")
states_transit = filters.filterStates(data[1:, :], "transit")
underq1, overq1, percentile_walking_25 = filters.filterPercentiles(states_walking, 25)
undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(states_walking, 50)
underq3, overq3, percentile_walking_75 = filters.filterPercentiles(states_walking, 75)
underq1_driving, overq1_driving, percentile_driving_25 = filters.filterPercentiles(states_driving, 25)
undermedian_driving, overmedian_driving, percentile_driving_50 = filters.filterPercentiles(states_driving, 50)
underq3_driving, overq3_driving, percentile_driving_75 = filters.filterPercentiles(states_driving, 75)
underq1_transit, overq1_transit, percentile_transit_25 = filters.filterPercentiles(states_transit, 25)
undermedian_transit, overmedian_transit, percentile_transit_50 = filters.filterPercentiles(states_transit, 50)
underq3_transit, overq3_transit, percentile_transit_75 = filters.filterPercentiles(states_transit, 75)
over100_waling = filters.filerOver100(states_walking)
underq1_states = states_walking[underq1,1]
overq3_states = states_walking[overq3,1]
over100_states = states_walking[over100_waling, 1]
over100_driving = filters.filerOver100(states_driving)
underq1_states_driving = states_driving[underq1_driving,1]
overq3_states_driving = states_driving[overq3_driving,1]
over100_states_driving = states_driving[over100_driving, 1]
over100_transit = filters.filerOver100(states_transit)
underq1_states_transit = states_transit[underq1_transit,1]
overq3_states_transit = states_transit[overq3_transit,1]
over100_states_transit = states_transit[over100_transit, 1]
print("walking under 25 percentile (far to normal) " + percentile_walking_25.astype(np.str))
print(underq1_states)
print("walking over 75 percentile (over normal trnasportation) " + percentile_walking_75.astype(np.str))
print(overq3_states)
print("walking over 100 in comparison to 13.1.2020")
print(over100_states)
print("Median value is " + percentile_walking_50.astype(np.str))
print(" ")
print("Driving under 25 percentile (far to normal) " + percentile_driving_25.astype(np.str))
print(underq1_states_driving)
print("Driving over 75 percentile (over normal trnasportation) ", percentile_driving_75.astype(np.str))
print(overq3_states_driving)
print("Driving over 100% in comparison to 13.1.2020")
print(over100_states_driving)
print("Median value is " + percentile_driving_50.astype(np.str))
print(" ")
print("Transit under 25 percentile (far to normal) " + percentile_transit_25.astype(np.str))
print(underq1_states_transit)
print("Transit over 75 percentile (over normal trnasportation) ", percentile_transit_75.astype(np.str))
print(overq3_states_transit.astype(np.str))
print("Transit over 100 in comparison to 13.1.2020")
print(over100_states_transit)
print("Median value is " + percentile_transit_50.astype(np.str))
print(" ")
|
normal
|
{
"blob_id": "5b4651f37cdcbb13f8ddd03327ef65af0f9cf61d",
"index": 1944,
"step-1": "<mask token>\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-2": "<mask token>\n\n\ndef getData():\n today = store.mapStore('today')\n npdata = store.mapStore('data')\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = (\n 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'\n .format(filedate))\n download = requests.get(url)\n download.encoding = 'utf-8'\n temp_file = open('temp/temp.csv', 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,\n encoding='utf8', invalid_raise=False, missing_values=np.nan,\n filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Not possible to read csv file .')\n print(e)\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-3": "<mask token>\nFIRST = 4\n\n\ndef prepareDate():\n pc_tz = timezone('US/Pacific')\n n = datetime.now(pc_tz)\n nd = n.date()\n store.updateStore(today=nd)\n\n\ndef getData():\n today = store.mapStore('today')\n npdata = store.mapStore('data')\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = (\n 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'\n .format(filedate))\n download = requests.get(url)\n download.encoding = 'utf-8'\n temp_file = open('temp/temp.csv', 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,\n encoding='utf8', invalid_raise=False, missing_values=np.nan,\n filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Not possible to read csv file .')\n print(e)\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-4": "import numpy as np\nfrom datetime import date, timedelta, datetime\nfrom pytz import timezone\nimport store\nimport psycopg2\nimport requests\nimport os\nimport filters\nFIRST = 4\n\n\ndef prepareDate():\n pc_tz = timezone('US/Pacific')\n n = datetime.now(pc_tz)\n nd = n.date()\n store.updateStore(today=nd)\n\n\ndef getData():\n today = store.mapStore('today')\n npdata = store.mapStore('data')\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = (\n 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'\n .format(filedate))\n download = requests.get(url)\n download.encoding = 'utf-8'\n temp_file = open('temp/temp.csv', 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt('temp/temp.csv', delimiter=',', dtype=np.str,\n encoding='utf8', invalid_raise=False, missing_values=np.nan,\n filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Not possible to read csv file .')\n print(e)\n\n\ndef getDates():\n dates = store.mapStore('dates')\n data = store.mapStore('data')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0, FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore('exceptions')\n exceptions.append(e)\n print('Problems with handling data numpy array')\n print(e)\n return True\n\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore('data')\n dates = store.mapStore('dates')\n exceptions = store.mapStore('exceptions')\n if len(exceptions) > 0:\n return False\n dataValues = data[1:, FIRST:]\n datesValues = dates\n if filterData is not None:\n datesValues = datesValues[filterData]\n dataValues = dataValues[:, filterData]\n sql = (\n 'INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)'\n )\n for ix, iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix + 1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore('exceptions')\n exceptions.append(error)\n\n\ndef addPercentileMessageToDB():\n data = store.mapStore('data')\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], 'driving')\n states_transit = filters.filterStates(data[1:, :], 'transit')\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(\n states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(\n states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(\n states_walking, 75)\n underq1_driving, overq1_driving, percentile_driving_25 = (filters.\n filterPercentiles(states_driving, 25))\n undermedian_driving, overmedian_driving, percentile_driving_50 = (filters\n .filterPercentiles(states_driving, 50))\n underq3_driving, overq3_driving, percentile_driving_75 = (filters.\n filterPercentiles(states_driving, 75))\n underq1_transit, overq1_transit, percentile_transit_25 = (filters.\n filterPercentiles(states_transit, 25))\n undermedian_transit, overmedian_transit, percentile_transit_50 = (filters\n .filterPercentiles(states_transit, 50))\n underq3_transit, overq3_transit, percentile_transit_75 = (filters.\n filterPercentiles(states_transit, 75))\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1, 1]\n overq3_states = states_walking[overq3, 1]\n over100_states = states_walking[over100_waling, 1]\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving, 1]\n overq3_states_driving = states_driving[overq3_driving, 1]\n over100_states_driving = states_driving[over100_driving, 1]\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit, 1]\n overq3_states_transit = states_transit[overq3_transit, 1]\n over100_states_transit = states_transit[over100_transit, 1]\n print('walking under 25 percentile (far to normal) ' +\n percentile_walking_25.astype(np.str))\n print(underq1_states)\n print('walking over 75 percentile (over normal trnasportation) ' +\n percentile_walking_75.astype(np.str))\n print(overq3_states)\n print('walking over 100 in comparison to 13.1.2020')\n print(over100_states)\n print('Median value is ' + percentile_walking_50.astype(np.str))\n print(' ')\n print('Driving under 25 percentile (far to normal) ' +\n percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print('Driving over 75 percentile (over normal trnasportation) ',\n percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print('Driving over 100% in comparison to 13.1.2020')\n print(over100_states_driving)\n print('Median value is ' + percentile_driving_50.astype(np.str))\n print(' ')\n print('Transit under 25 percentile (far to normal) ' +\n percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print('Transit over 75 percentile (over normal trnasportation) ',\n percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print('Transit over 100 in comparison to 13.1.2020')\n print(over100_states_transit)\n print('Median value is ' + percentile_transit_50.astype(np.str))\n print(' ')\n",
"step-5": "import numpy as np\nfrom datetime import date, timedelta, datetime\nfrom pytz import timezone\nimport store\nimport psycopg2\nimport requests\nimport os\nimport filters\n\nFIRST = 4\n\ndef prepareDate():\n pc_tz = timezone('US/Pacific')\n n = datetime.now(pc_tz)\n nd = n.date()\n store.updateStore(today=nd)\n\ndef getData():\n today = store.mapStore(\"today\")\n npdata = store.mapStore(\"data\")\n filedate = np.datetime64(today - timedelta(days=2))\n try:\n url = 'https://covid19-static.cdn-apple.com/covid19-mobility-data/2007HotfixDev49/v2/en-us/applemobilitytrends-{}.csv'.format(filedate)\n download = requests.get(url)\n download.encoding = \"utf-8\"\n temp_file = open(\"temp/temp.csv\", 'w', encoding='utf8')\n temp_file.writelines(download.text)\n npcsv = np.genfromtxt(\"temp/temp.csv\", delimiter=',', dtype=np.str, encoding='utf8', invalid_raise=False, missing_values = np.nan, filling_values=np.nan)\n temp_file.close()\n store.updateStore(data=npcsv)\n except Exception as e:\n exceptions = store.mapStore(\"exceptions\")\n exceptions.append(e)\n print(\"Not possible to read csv file .\")\n print(e)\n\ndef getDates():\n dates = store.mapStore(\"dates\")\n data = store.mapStore(\"data\")\n exceptions = store.mapStore(\"exceptions\")\n if(len(exceptions) > 0):\n return False\n try:\n d0 = date(2020, 1, 13)\n d1 = data[0,FIRST:]\n i = 0\n newdates = []\n while i <= d1.shape[0] - 1:\n diffday = np.datetime64(d0 + timedelta(days=i))\n newdates.append(diffday)\n i += 1\n newdates = np.concatenate((dates, newdates))\n store.updateStore(dates=newdates)\n except Exception as e:\n exceptions = store.mapStore(\"exceptions\")\n exceptions.append(e)\n print(\"Problems with handling data numpy array\")\n print(e)\n return True\n\ndef addDataToDB(conn, filterData):\n data = store.mapStore(\"data\")\n dates = store.mapStore(\"dates\")\n exceptions = store.mapStore(\"exceptions\")\n if(len(exceptions) > 0):\n return False\n dataValues = data[1:,FIRST:]\n datesValues = dates\n if(filterData is not None):\n datesValues = datesValues[filterData]\n dataValues = dataValues[:,filterData]\n sql = \"INSERT INTO apple_transport(geo_type, region, transportation_type, alternative_name, date, value) VALUES(%s, %s, %s, %s, %s, %s)\"\n for ix,iy in np.ndindex(dataValues.shape):\n try:\n date = datesValues[iy].astype(datetime)\n values = data[ix+1, :FIRST]\n values = tuple(values.tolist())\n item = dataValues[ix, iy].item()\n try:\n item = float(item)\n except:\n item = None\n values = values + tuple([date, item])\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n cursor.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n exceptions = store.mapStore(\"exceptions\")\n exceptions.append(error)\n\ndef addPercentileMessageToDB():\n data = store.mapStore(\"data\")\n states_walking = filters.filterStates(data[1:, :])\n states_driving = filters.filterStates(data[1:, :], \"driving\")\n states_transit = filters.filterStates(data[1:, :], \"transit\")\n underq1, overq1, percentile_walking_25 = filters.filterPercentiles(states_walking, 25)\n undermedian, overmedian, percentile_walking_50 = filters.filterPercentiles(states_walking, 50)\n underq3, overq3, percentile_walking_75 = filters.filterPercentiles(states_walking, 75)\n\n underq1_driving, overq1_driving, percentile_driving_25 = filters.filterPercentiles(states_driving, 25)\n undermedian_driving, overmedian_driving, percentile_driving_50 = filters.filterPercentiles(states_driving, 50)\n underq3_driving, overq3_driving, percentile_driving_75 = filters.filterPercentiles(states_driving, 75)\n\n underq1_transit, overq1_transit, percentile_transit_25 = filters.filterPercentiles(states_transit, 25)\n undermedian_transit, overmedian_transit, percentile_transit_50 = filters.filterPercentiles(states_transit, 50)\n underq3_transit, overq3_transit, percentile_transit_75 = filters.filterPercentiles(states_transit, 75)\n\n over100_waling = filters.filerOver100(states_walking)\n underq1_states = states_walking[underq1,1]\n overq3_states = states_walking[overq3,1]\n over100_states = states_walking[over100_waling, 1]\n\n over100_driving = filters.filerOver100(states_driving)\n underq1_states_driving = states_driving[underq1_driving,1]\n overq3_states_driving = states_driving[overq3_driving,1]\n over100_states_driving = states_driving[over100_driving, 1]\n\n over100_transit = filters.filerOver100(states_transit)\n underq1_states_transit = states_transit[underq1_transit,1]\n overq3_states_transit = states_transit[overq3_transit,1]\n over100_states_transit = states_transit[over100_transit, 1]\n print(\"walking under 25 percentile (far to normal) \" + percentile_walking_25.astype(np.str))\n print(underq1_states)\n print(\"walking over 75 percentile (over normal trnasportation) \" + percentile_walking_75.astype(np.str))\n print(overq3_states)\n print(\"walking over 100 in comparison to 13.1.2020\")\n print(over100_states)\n print(\"Median value is \" + percentile_walking_50.astype(np.str))\n print(\" \")\n\n print(\"Driving under 25 percentile (far to normal) \" + percentile_driving_25.astype(np.str))\n print(underq1_states_driving)\n print(\"Driving over 75 percentile (over normal trnasportation) \", percentile_driving_75.astype(np.str))\n print(overq3_states_driving)\n print(\"Driving over 100% in comparison to 13.1.2020\")\n print(over100_states_driving)\n print(\"Median value is \" + percentile_driving_50.astype(np.str))\n print(\" \")\n\n print(\"Transit under 25 percentile (far to normal) \" + percentile_transit_25.astype(np.str))\n print(underq1_states_transit)\n print(\"Transit over 75 percentile (over normal trnasportation) \", percentile_transit_75.astype(np.str))\n print(overq3_states_transit.astype(np.str))\n print(\"Transit over 100 in comparison to 13.1.2020\")\n print(over100_states_transit)\n print(\"Median value is \" + percentile_transit_50.astype(np.str))\n print(\" \")\n\n\n \n\n \n \n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
x, y = [float(x) for x in raw_input().split(" ")]
print(x*y)
|
normal
|
{
"blob_id": "1ed7fb0dd5f0fa5e60c855eceaaf3259092918ef",
"index": 1240,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(x * y)\n",
"step-3": "x, y = [float(x) for x in raw_input().split(' ')]\nprint(x * y)\n",
"step-4": "x, y = [float(x) for x in raw_input().split(\" \")]\nprint(x*y)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from db_upgrader.Repositories.store import Store, StoreException
from db_upgrader.Models.product import *
class ProductStore(Store):
table = 'product'
def add_product(self, product):
try:
c = self.conn.cursor()
c.execute(
'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'
, (product.name, product.customerId, product.is_enable))
return c.lastrowid
except Exception as e:
raise StoreException('error storing product: {}'.format(e))
|
normal
|
{
"blob_id": "963499e071873083dc942486b9a5b094393cd99e",
"index": 4458,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductStore(Store):\n <mask token>\n\n def add_product(self, product):\n try:\n c = self.conn.cursor()\n c.execute(\n 'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'\n , (product.name, product.customerId, product.is_enable))\n return c.lastrowid\n except Exception as e:\n raise StoreException('error storing product: {}'.format(e))\n",
"step-3": "<mask token>\n\n\nclass ProductStore(Store):\n table = 'product'\n\n def add_product(self, product):\n try:\n c = self.conn.cursor()\n c.execute(\n 'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'\n , (product.name, product.customerId, product.is_enable))\n return c.lastrowid\n except Exception as e:\n raise StoreException('error storing product: {}'.format(e))\n",
"step-4": "from db_upgrader.Repositories.store import Store, StoreException\nfrom db_upgrader.Models.product import *\n\n\nclass ProductStore(Store):\n table = 'product'\n\n def add_product(self, product):\n try:\n c = self.conn.cursor()\n c.execute(\n 'INSERT INTO product (`name`,customerId,is_enable) VALUES(%s,%s,%s)'\n , (product.name, product.customerId, product.is_enable))\n return c.lastrowid\n except Exception as e:\n raise StoreException('error storing product: {}'.format(e))\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve():
a = int(input())
b = int(input())
return math.sqrt(a * a + b * b)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solve():
a = int(input())
b = int(input())
return math.sqrt(a * a + b * b)
print(solve())
<|reserved_special_token_1|>
import math
def solve():
a = int(input())
b = int(input())
return math.sqrt(a * a + b * b)
print(solve())
|
flexible
|
{
"blob_id": "a22d38f7e8122d6339d1beab3bf08fa41c36d61d",
"index": 9648,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solve():\n a = int(input())\n b = int(input())\n return math.sqrt(a * a + b * b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solve():\n a = int(input())\n b = int(input())\n return math.sqrt(a * a + b * b)\n\n\nprint(solve())\n",
"step-4": "import math\n\n\ndef solve():\n a = int(input())\n b = int(input())\n return math.sqrt(a * a + b * b)\n\n\nprint(solve())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
'''
This module is used for handling the button.
'''
import RPi.GPIO as GPIO
from aiy.voicehat import *
class Button:
status = bool() #status indicates whether it is supposed to be on or off.
LED_pin = 25 #Pin for the LED in the button in the Google AIY kit.
button_pin = 23#The button is handled through the Google AIY lib because that one might actually work.
def __init__(self):
self.status = True
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.LED_pin , GPIO.OUT)
self.status = get_button(self.button_pin)
def read_button(self):
self.status = GPIO.input(self.button_pin)
#Turns on the button light as prompted
def light(self, stat):
if (stat):
GPIO.output(self.LED_pin, 1)
else:
GPIO.output(self.LED_pin, 0)
def cleanup(self):
GPIO.cleanup()
|
normal
|
{
"blob_id": "878937e19d6a48a0d44309efbac1d41c208ce849",
"index": 6195,
"step-1": "<mask token>\n\n\nclass Button:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def read_button(self):\n self.status = GPIO.input(self.button_pin)\n\n def light(self, stat):\n if stat:\n GPIO.output(self.LED_pin, 1)\n else:\n GPIO.output(self.LED_pin, 0)\n\n def cleanup(self):\n GPIO.cleanup()\n",
"step-2": "<mask token>\n\n\nclass Button:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self.status = True\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.LED_pin, GPIO.OUT)\n self.status = get_button(self.button_pin)\n\n def read_button(self):\n self.status = GPIO.input(self.button_pin)\n\n def light(self, stat):\n if stat:\n GPIO.output(self.LED_pin, 1)\n else:\n GPIO.output(self.LED_pin, 0)\n\n def cleanup(self):\n GPIO.cleanup()\n",
"step-3": "<mask token>\n\n\nclass Button:\n status = bool()\n LED_pin = 25\n button_pin = 23\n\n def __init__(self):\n self.status = True\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.LED_pin, GPIO.OUT)\n self.status = get_button(self.button_pin)\n\n def read_button(self):\n self.status = GPIO.input(self.button_pin)\n\n def light(self, stat):\n if stat:\n GPIO.output(self.LED_pin, 1)\n else:\n GPIO.output(self.LED_pin, 0)\n\n def cleanup(self):\n GPIO.cleanup()\n",
"step-4": "<mask token>\nimport RPi.GPIO as GPIO\nfrom aiy.voicehat import *\n\n\nclass Button:\n status = bool()\n LED_pin = 25\n button_pin = 23\n\n def __init__(self):\n self.status = True\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.LED_pin, GPIO.OUT)\n self.status = get_button(self.button_pin)\n\n def read_button(self):\n self.status = GPIO.input(self.button_pin)\n\n def light(self, stat):\n if stat:\n GPIO.output(self.LED_pin, 1)\n else:\n GPIO.output(self.LED_pin, 0)\n\n def cleanup(self):\n GPIO.cleanup()\n",
"step-5": "'''\nThis module is used for handling the button. \n'''\nimport RPi.GPIO as GPIO\nfrom aiy.voicehat import *\n\nclass Button:\n status = bool() #status indicates whether it is supposed to be on or off. \n LED_pin = 25 #Pin for the LED in the button in the Google AIY kit. \n button_pin = 23#The button is handled through the Google AIY lib because that one might actually work. \n \n def __init__(self):\n self.status = True\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.LED_pin , GPIO.OUT)\n self.status = get_button(self.button_pin)\n \n def read_button(self):\n self.status = GPIO.input(self.button_pin)\n \n #Turns on the button light as prompted\n def light(self, stat):\n if (stat):\n GPIO.output(self.LED_pin, 1)\n else:\n GPIO.output(self.LED_pin, 0)\n \n def cleanup(self):\n GPIO.cleanup()",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sns.catplot(x='romantic', y='absences', data=student_data, kind='point',
hue='school', ci=None, estimator=median)
plt.show()
<|reserved_special_token_1|>
import numpy as np
from numpy import median
sns.catplot(x='romantic', y='absences', data=student_data, kind='point',
hue='school', ci=None, estimator=median)
plt.show()
<|reserved_special_token_1|>
# 5/1/2020
# Import median function from numpy
import numpy as np
from numpy import median
# Plot the median number of absences instead of the mean
sns.catplot(x="romantic", y="absences",
data=student_data,
kind="point",
hue="school",
ci=None,
estimator = median)
# Show plot
plt.show()
|
flexible
|
{
"blob_id": "11072601e31ceba13f8adf6c070f84ca5add35e9",
"index": 3300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsns.catplot(x='romantic', y='absences', data=student_data, kind='point',\n hue='school', ci=None, estimator=median)\nplt.show()\n",
"step-3": "import numpy as np\nfrom numpy import median\nsns.catplot(x='romantic', y='absences', data=student_data, kind='point',\n hue='school', ci=None, estimator=median)\nplt.show()\n",
"step-4": "# 5/1/2020\n# Import median function from numpy\nimport numpy as np\nfrom numpy import median\n\n# Plot the median number of absences instead of the mean\nsns.catplot(x=\"romantic\", y=\"absences\",\n\t\t\tdata=student_data,\n kind=\"point\",\n hue=\"school\",\n ci=None,\n estimator = median)\n\n# Show plot\nplt.show()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Bank:
<|reserved_special_token_0|>
def __init__(self):
self.Bank_name = 'State Bank of India'
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
class CustomerDetails(Bank):
check_amt = 18
def __init__(self, name, identity, acc, op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return (
f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
)
def deposite(self, credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f"You've added {self.credit} : Total Amount = {self.op_amount}")
return Bank.amount
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return (
f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
)
else:
return f'{self.name} your Balance : {self.op_amount}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bank:
amount = 0
def __init__(self):
self.Bank_name = 'State Bank of India'
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
class CustomerDetails(Bank):
check_amt = 18
def __init__(self, name, identity, acc, op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return (
f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
)
def deposite(self, credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f"You've added {self.credit} : Total Amount = {self.op_amount}")
return Bank.amount
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return (
f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
)
else:
return f'{self.name} your Balance : {self.op_amount}'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculate(a):
return a
data = set()
class Bank:
amount = 0
def __init__(self):
self.Bank_name = 'State Bank of India'
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
class CustomerDetails(Bank):
check_amt = 18
def __init__(self, name, identity, acc, op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return (
f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
)
def deposite(self, credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f"You've added {self.credit} : Total Amount = {self.op_amount}")
return Bank.amount
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return (
f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
)
else:
return f'{self.name} your Balance : {self.op_amount}'
cus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)
print(cus2)
cus2.deposite(20000)
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2)
<|reserved_special_token_1|>
import datetime
import time
def calculate(a):
return a
data = set()
class Bank:
amount = 0
def __init__(self):
self.Bank_name = 'State Bank of India'
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
class CustomerDetails(Bank):
check_amt = 18
def __init__(self, name, identity, acc, op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return (
f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
)
def deposite(self, credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f"You've added {self.credit} : Total Amount = {self.op_amount}")
return Bank.amount
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return (
f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
)
else:
return f'{self.name} your Balance : {self.op_amount}'
cus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)
print(cus2)
cus2.deposite(20000)
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2)
<|reserved_special_token_1|>
import datetime
import time
def calculate(a):
return a
data = set()
class Bank:
amount = 0
def __init__(self):
self.Bank_name = "State Bank of India"
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
# self.stored = datetime.date.today()
class CustomerDetails(Bank):
check_amt = 18
def __init__(self,name,identity,acc,op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
# stored = datetime.datetime.today()
# def __repr__(self)
def deposite(self,credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f'You\'ve added {self.credit} : Total Amount = {self.op_amount}')
return (Bank.amount)
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
else:
return f'{self.name} your Balance : {self.op_amount}'
# cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000)
# print(cus1)
cus2 = CustomerDetails('Pawan','755376288078','37376989161',10000)
print(cus2)
cus2.deposite(20000)
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2)
# print(cus2.check_balance())
|
flexible
|
{
"blob_id": "66ae7f4ee01ca5516d8e3dc447eeb4709e2b6aec",
"index": 4615,
"step-1": "<mask token>\n\n\nclass Bank:\n <mask token>\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculate(a):\n return a\n\n\ndata = set()\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\ncus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n",
"step-4": "import datetime\nimport time\n\n\ndef calculate(a):\n return a\n\n\ndata = set()\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\ncus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n",
"step-5": "import datetime\nimport time\n\ndef calculate(a):\n return a\n\n\ndata = set()\nclass Bank:\n amount = 0\n def __init__(self):\n self.Bank_name = \"State Bank of India\"\n self.ifsc = 'SBI0N00012'\n \n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n # self.stored = datetime.date.today()\n\nclass CustomerDetails(Bank):\n check_amt = 18\n def __init__(self,name,identity,acc,op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n\n # stored = datetime.datetime.today()\n # def __repr__(self)\n def deposite(self,credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f'You\\'ve added {self.credit} : Total Amount = {self.op_amount}')\n return (Bank.amount)\n \n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n \n \n\n# cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000)\n# print(cus1)\ncus2 = CustomerDetails('Pawan','755376288078','37376989161',10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n# print(cus2.check_balance())\n\n\n ",
"step-ids": [
9,
10,
13,
14,
15
]
}
|
[
9,
10,
13,
14,
15
] |
import json
import math
import rospy
import sys
import RPi.GPIO as GPIO
from std_msgs.msg import Float32
from geometry_msgs.msg import Point32
from time import sleep
#pulse width of difference rotations
d_45 = 1.0
d_90 = 1.5
d_180 = 2.5
frequency = 50.0
t_per_cycle = (1.0 / frequency) * 1000.0
#convert to duty cycles
duty_45 = (d_45 / t_per_cycle) * 100.0
duty_90 = (d_90 / t_per_cycle) * 100.0
duty_180 = (d_180 / t_per_cycle) * 100.0
#gear spec
radius = 2.25
cir = 2.0 * radius * math.pi
d = cir / 20.0
cm_theta = 18.0 / d
z_radius = 1.0
z_cir = 2.0 * z_radius * math.pi
z_d = z_cir / 10.0
z_cm_theta = 36.0 / d
class Servo_node:
def __init__(self):
rospy.init_node('servo_node', anonymous=False)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
# Setting up for pin 12. Make sure to adjust for your own needs
motor_x = 13
motor_y = 12
motor_z = 20
GPIO.setup(motor_x, GPIO.OUT)
GPIO.setup(motor_y, GPIO.OUT)
GPIO.setup(motor_z, GPIO.OUT)
# 0.75-2.75
self.pwm_x = GPIO.PWM(motor_x, frequency)
# 2-3
self.pwm_y = GPIO.PWM(motor_y, frequency)
# 0.8-1.8
self.pwm_z = GPIO.PWM(motor_z, frequency)
#set start position to (0,0)
self.pwm_z.start(duty_45)
sleep(0.5)
self.pwm_z.ChangeDutyCycle(0)
self.pwm_x.start(duty_180)
sleep(0.5)
self.pwm_x.ChangeDutyCycle(0)
self.pwm_y.start(duty_45)
sleep(0.5)
self.pwm_y.ChangeDutyCycle(0)
#topic takes angle as message
self.sub_x = rospy.Subscriber("/servo_ctrl/s1", Float32, self.set_servo_x_angle)
self.sub_y = rospy.Subscriber("/servo_ctrl/s2", Float32, self.set_servo_y_angle)
self.sub_z = rospy.Subscriber("/servo_ctrl/s3", Float32, self.set_servo_z_angle)
#topic for position commands
self.pos_sub = rospy.Subscriber("/servo_ctrl/pos", Point32, self.set_coordinate)
def set_servo_x_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))# Note tha this does not correspond to angle
sleep(1)
self.pwm_x.ChangeDutyCycle(0)
sleep(0.5)
def set_servo_y_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle
sleep(1)
self.pwm_y.ChangeDutyCycle(0)
sleep(0.5)
def set_servo_z_angle(self, msg):
rospy.loginfo("setting servo")
self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle
sleep(1)
self.pwm_z.ChangeDutyCycle(0)
sleep(0.5)
def set_coordinate(self, msg):
#conversion between coordinate to motor angles
rospy.loginfo("setting position")
#correction for motors
#offset added to make sure the touch probe is at (0,0) initially
#may need to change depends on your motor
x_offset = 0
y_offset = -5
z_offset = 0
x = msg.x
y = msg.y
z = msg.z
z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)
x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)
y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)
x_angle = 180 - x * cm_theta + x_offset
y_angle = 45 + y * cm_theta + y_offset
z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset
if x == -1 or y == -1 or z == -1:
if x == -1:
self.pwm_x.ChangeDutyCycle(0)
else:
x_pub.publish(Float32(x_angle))
if y == -1:
self.pwm_y.ChangeDutyCycle(0)
else:
y_pub.publish(Float32(y_angle))
if z == -1:
self.pwm_z.ChangeDutyCycle(0)
else:
z_pub.publish(Float32(z_angle))
elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:
# z_pub.publish(Float32(45))
x_pub.publish(Float32(x_angle))
y_pub.publish(Float32(y_angle))
z_pub.publish(Float32(z_angle))
def saturate_input(self, angle):
#conversion from angle to duty cycles
print(angle)
pw_per_deg = (duty_180 - duty_90) / 90;
duty = pw_per_deg * (angle - 45) + duty_45
print(duty)
return max(min(duty,100),0)
def main_loop():
rate = rospy.Rate(10) # 10Hz
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
servo = Servo_node()
main_loop()
|
normal
|
{
"blob_id": "95845aeb47e0d2c579739767ece35f4134564d98",
"index": 7717,
"step-1": "<mask token>\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\ndef main_loop():\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\n<mask token>\n",
"step-3": "<mask token>\nd_45 = 1.0\nd_90 = 1.5\nd_180 = 2.5\nfrequency = 50.0\nt_per_cycle = 1.0 / frequency * 1000.0\nduty_45 = d_45 / t_per_cycle * 100.0\nduty_90 = d_90 / t_per_cycle * 100.0\nduty_180 = d_180 / t_per_cycle * 100.0\nradius = 2.25\ncir = 2.0 * radius * math.pi\nd = cir / 20.0\ncm_theta = 18.0 / d\nz_radius = 1.0\nz_cir = 2.0 * z_radius * math.pi\nz_d = z_cir / 10.0\nz_cm_theta = 36.0 / d\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\ndef main_loop():\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\nif __name__ == '__main__':\n servo = Servo_node()\n main_loop()\n",
"step-4": "import json\nimport math\nimport rospy\nimport sys\nimport RPi.GPIO as GPIO\nfrom std_msgs.msg import Float32\nfrom geometry_msgs.msg import Point32\nfrom time import sleep\nd_45 = 1.0\nd_90 = 1.5\nd_180 = 2.5\nfrequency = 50.0\nt_per_cycle = 1.0 / frequency * 1000.0\nduty_45 = d_45 / t_per_cycle * 100.0\nduty_90 = d_90 / t_per_cycle * 100.0\nduty_180 = d_180 / t_per_cycle * 100.0\nradius = 2.25\ncir = 2.0 * radius * math.pi\nd = cir / 20.0\ncm_theta = 18.0 / d\nz_radius = 1.0\nz_cir = 2.0 * z_radius * math.pi\nz_d = z_cir / 10.0\nz_cm_theta = 36.0 / d\n\n\nclass Servo_node:\n\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n motor_x = 13\n motor_y = 12\n motor_z = 20\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n self.sub_x = rospy.Subscriber('/servo_ctrl/s1', Float32, self.\n set_servo_x_angle)\n self.sub_y = rospy.Subscriber('/servo_ctrl/s2', Float32, self.\n set_servo_y_angle)\n self.sub_z = rospy.Subscriber('/servo_ctrl/s3', Float32, self.\n set_servo_z_angle)\n self.pos_sub = rospy.Subscriber('/servo_ctrl/pos', Point32, self.\n set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo('setting servo')\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data))\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_coordinate(self, msg):\n rospy.loginfo('setting position')\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty, 100), 0)\n\n\ndef main_loop():\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n rate.sleep()\n\n\nif __name__ == '__main__':\n servo = Servo_node()\n main_loop()\n",
"step-5": "import json\nimport math\nimport rospy\nimport sys\nimport RPi.GPIO as GPIO\nfrom std_msgs.msg import Float32\nfrom geometry_msgs.msg import Point32\nfrom time import sleep\n\n#pulse width of difference rotations\nd_45 = 1.0\nd_90 = 1.5\nd_180 = 2.5\n\nfrequency = 50.0\nt_per_cycle = (1.0 / frequency) * 1000.0\n\n#convert to duty cycles\nduty_45 = (d_45 / t_per_cycle) * 100.0\nduty_90 = (d_90 / t_per_cycle) * 100.0\nduty_180 = (d_180 / t_per_cycle) * 100.0\n\n#gear spec\nradius = 2.25\ncir = 2.0 * radius * math.pi\nd = cir / 20.0\ncm_theta = 18.0 / d\n\nz_radius = 1.0\nz_cir = 2.0 * z_radius * math.pi\nz_d = z_cir / 10.0\nz_cm_theta = 36.0 / d\n\n\n\nclass Servo_node:\n def __init__(self):\n rospy.init_node('servo_node', anonymous=False)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n # Setting up for pin 12. Make sure to adjust for your own needs\n motor_x = 13\n motor_y = 12\n motor_z = 20\n\n GPIO.setup(motor_x, GPIO.OUT)\n GPIO.setup(motor_y, GPIO.OUT)\n GPIO.setup(motor_z, GPIO.OUT)\n # 0.75-2.75\n self.pwm_x = GPIO.PWM(motor_x, frequency)\n # 2-3\n self.pwm_y = GPIO.PWM(motor_y, frequency)\n # 0.8-1.8\n self.pwm_z = GPIO.PWM(motor_z, frequency)\n #set start position to (0,0)\n self.pwm_z.start(duty_45)\n sleep(0.5)\n self.pwm_z.ChangeDutyCycle(0)\n\n self.pwm_x.start(duty_180)\n sleep(0.5)\n self.pwm_x.ChangeDutyCycle(0)\n\n self.pwm_y.start(duty_45)\n sleep(0.5)\n self.pwm_y.ChangeDutyCycle(0)\n #topic takes angle as message\n self.sub_x = rospy.Subscriber(\"/servo_ctrl/s1\", Float32, self.set_servo_x_angle)\n self.sub_y = rospy.Subscriber(\"/servo_ctrl/s2\", Float32, self.set_servo_y_angle)\n self.sub_z = rospy.Subscriber(\"/servo_ctrl/s3\", Float32, self.set_servo_z_angle)\n #topic for position commands\n self.pos_sub = rospy.Subscriber(\"/servo_ctrl/pos\", Point32, self.set_coordinate)\n\n def set_servo_x_angle(self, msg):\n rospy.loginfo(\"setting servo\")\n self.pwm_x.ChangeDutyCycle(self.saturate_input(msg.data))# Note tha this does not correspond to angle\n sleep(1)\n self.pwm_x.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_y_angle(self, msg):\n rospy.loginfo(\"setting servo\")\n self.pwm_y.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle\n sleep(1)\n self.pwm_y.ChangeDutyCycle(0)\n sleep(0.5)\n\n def set_servo_z_angle(self, msg):\n rospy.loginfo(\"setting servo\")\n self.pwm_z.ChangeDutyCycle(self.saturate_input(msg.data)) # Note tha this does not correspond to angle\n sleep(1)\n self.pwm_z.ChangeDutyCycle(0)\n sleep(0.5)\n def set_coordinate(self, msg):\n #conversion between coordinate to motor angles\n rospy.loginfo(\"setting position\")\n #correction for motors\n #offset added to make sure the touch probe is at (0,0) initially\n #may need to change depends on your motor\n x_offset = 0\n y_offset = -5\n z_offset = 0\n x = msg.x\n y = msg.y\n z = msg.z\n z_pub = rospy.Publisher('servo_ctrl/s3', Float32, queue_size=10)\n x_pub = rospy.Publisher('servo_ctrl/s1', Float32, queue_size=10)\n y_pub = rospy.Publisher('servo_ctrl/s2', Float32, queue_size=10)\n x_angle = 180 - x * cm_theta + x_offset\n y_angle = 45 + y * cm_theta + y_offset\n z_angle = 45 + (1.5 - z) * z_cm_theta + z_offset\n\n if x == -1 or y == -1 or z == -1:\n if x == -1:\n self.pwm_x.ChangeDutyCycle(0)\n else:\n x_pub.publish(Float32(x_angle))\n if y == -1:\n self.pwm_y.ChangeDutyCycle(0)\n else:\n y_pub.publish(Float32(y_angle))\n if z == -1:\n self.pwm_z.ChangeDutyCycle(0)\n else:\n z_pub.publish(Float32(z_angle))\n elif x >= 0 and x <= 2.5 and y >= 0 and y <= 4:\n # z_pub.publish(Float32(45))\n x_pub.publish(Float32(x_angle))\n y_pub.publish(Float32(y_angle))\n z_pub.publish(Float32(z_angle))\n\n def saturate_input(self, angle):\n #conversion from angle to duty cycles\n print(angle)\n pw_per_deg = (duty_180 - duty_90) / 90;\n duty = pw_per_deg * (angle - 45) + duty_45\n print(duty)\n return max(min(duty,100),0)\n\n\ndef main_loop():\n rate = rospy.Rate(10) # 10Hz\n while not rospy.is_shutdown():\n rate.sleep()\n\nif __name__ == \"__main__\":\n servo = Servo_node()\n main_loop()\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# Example:
# Given nums = [2, 7, 11, 15], target = 9,
# Because nums[0] + nums[1] = 2 + 7 = 9,
# return [0, 1].
class Solution:
def twoSum(self, nums, target):
# create a dictionary using the values of the array as the dictionary keys, and the indices of the array as the dictionary values
d = dict([(nums[i],i) for i in range(len(nums))])
# iterate through the array
for n in range(len(nums)):
# find the difference between the target number and the integer in the array
dif = target - nums[n]
# find the difference as a key in the dictionary, be careful that the dictionary's value is not the same as the array's indice (can happen when the difference is half of the target number, but there are not two halves in the array)
if dif in d and d[dif] != n:
# if found, return the two indices of the numbers that add up to the target number
return (n,d[dif])
# just in case there is no solution, even though the problem allows for the assumption that there is always one solution
return ("No solution available")
# initilize a test case
s = Solution()
nums = [7,2,7,15]
target = 14
a = s.twoSum(nums,target)
print(a)
# create a dictionary that stores the indices as the keys and the integers as the values
# iterate through the array, attempting to find the target minus the integer as a key in the dictionary
# return the indice of the integer and the value of the key
# watch out for arrays that involve duplicates, such as [3,3,7,2], target 6
|
normal
|
{
"blob_id": "16cc85324b555f0cfec8d577b776b86872578822",
"index": 6016,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def twoSum(self, nums, target):\n d = dict([(nums[i], i) for i in range(len(nums))])\n for n in range(len(nums)):\n dif = target - nums[n]\n if dif in d and d[dif] != n:\n return n, d[dif]\n return 'No solution available'\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def twoSum(self, nums, target):\n d = dict([(nums[i], i) for i in range(len(nums))])\n for n in range(len(nums)):\n dif = target - nums[n]\n if dif in d and d[dif] != n:\n return n, d[dif]\n return 'No solution available'\n\n\n<mask token>\nprint(a)\n",
"step-4": "class Solution:\n\n def twoSum(self, nums, target):\n d = dict([(nums[i], i) for i in range(len(nums))])\n for n in range(len(nums)):\n dif = target - nums[n]\n if dif in d and d[dif] != n:\n return n, d[dif]\n return 'No solution available'\n\n\ns = Solution()\nnums = [7, 2, 7, 15]\ntarget = 14\na = s.twoSum(nums, target)\nprint(a)\n",
"step-5": "# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n\n# Example:\n# Given nums = [2, 7, 11, 15], target = 9,\n# Because nums[0] + nums[1] = 2 + 7 = 9,\n# return [0, 1].\n\nclass Solution:\n def twoSum(self, nums, target):\n # create a dictionary using the values of the array as the dictionary keys, and the indices of the array as the dictionary values\n d = dict([(nums[i],i) for i in range(len(nums))])\n # iterate through the array\n for n in range(len(nums)):\n # find the difference between the target number and the integer in the array\n dif = target - nums[n]\n # find the difference as a key in the dictionary, be careful that the dictionary's value is not the same as the array's indice (can happen when the difference is half of the target number, but there are not two halves in the array)\n if dif in d and d[dif] != n:\n # if found, return the two indices of the numbers that add up to the target number\n return (n,d[dif])\n # just in case there is no solution, even though the problem allows for the assumption that there is always one solution\n return (\"No solution available\")\n\n# initilize a test case\ns = Solution()\nnums = [7,2,7,15]\ntarget = 14\na = s.twoSum(nums,target)\nprint(a)\n\n# create a dictionary that stores the indices as the keys and the integers as the values\n# iterate through the array, attempting to find the target minus the integer as a key in the dictionary\n# return the indice of the integer and the value of the key\n# watch out for arrays that involve duplicates, such as [3,3,7,2], target 6",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BinarySearchTreeNode:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BinarySearchTreeNode:
def __init__(self, node_data):
self.data = node_data
self.left = None
self.right = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BinarySearchTreeNode:
def __init__(self, node_data):
self.data = node_data
self.left = None
self.right = None
def bst_contains(root: BinarySearchTreeNode, number):
if root is None:
return 0
if root.data == number:
return 1
elif root.data < number:
return bst_contains(root.right, number)
elif root.data > number:
return bst_contains(root.left, number)
<|reserved_special_token_1|>
class BinarySearchTreeNode:
def __init__(self, node_data):
self.data = node_data
self.left = None
self.right = None
def bst_contains(root: BinarySearchTreeNode, number):
if root is None:
return 0
if(root.data == number):
return 1
elif(root.data < number):
#si int es mas grande que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.right, number)
elif(root.data > number):
#si int es mas pequeno que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.left, number)
|
flexible
|
{
"blob_id": "3bdf3a48451b83347a6c9a9851b5b85b608f0b63",
"index": 2826,
"step-1": "<mask token>\n",
"step-2": "class BinarySearchTreeNode:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class BinarySearchTreeNode:\n\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\n\n<mask token>\n",
"step-4": "class BinarySearchTreeNode:\n\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\n\ndef bst_contains(root: BinarySearchTreeNode, number):\n if root is None:\n return 0\n if root.data == number:\n return 1\n elif root.data < number:\n return bst_contains(root.right, number)\n elif root.data > number:\n return bst_contains(root.left, number)\n",
"step-5": "class BinarySearchTreeNode:\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\ndef bst_contains(root: BinarySearchTreeNode, number):\n if root is None:\n return 0\n\n if(root.data == number):\n return 1\n elif(root.data < number):\n #si int es mas grande que el data actual, buscas en derecha\n #-----------return es importantitismo------------\n return bst_contains(root.right, number)\n\n elif(root.data > number):\n #si int es mas pequeno que el data actual, buscas en derecha\n #-----------return es importantitismo------------\n return bst_contains(root.left, number)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# 選択肢が書き換えられないようにlistではなくtupleを使う
chose_from_two = ('A', 'B', 'C')
answer = []
answer.append('A')
answer.append('C')
print(chose_from_two)
# ('A', 'B', 'C')
print(answer)
# ['A', 'C']
|
normal
|
{
"blob_id": "70b26052d9516fd067ff71074a6dc4c58ace7d80",
"index": 5513,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nanswer.append('A')\nanswer.append('C')\nprint(chose_from_two)\nprint(answer)\n",
"step-3": "chose_from_two = 'A', 'B', 'C'\nanswer = []\nanswer.append('A')\nanswer.append('C')\nprint(chose_from_two)\nprint(answer)\n",
"step-4": "# 選択肢が書き換えられないようにlistではなくtupleを使う\nchose_from_two = ('A', 'B', 'C')\n\nanswer = []\nanswer.append('A')\nanswer.append('C')\nprint(chose_from_two)\n# ('A', 'B', 'C')\nprint(answer)\n# ['A', 'C']",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
input = open('in.txt')
output = open('out.py', 'w+')
def opstr(op):
if op == 'RSHIFT': return '>>'
if op == 'LSHIFT': return '<<'
if op == 'OR': return '|'
if op == 'AND': return '&'
if op == 'NOT': return '~'
raise RuntimeError('Unknown {0}'.format(op))
def funstr(fun):
return '{0}_fn'.format(fun)
def topython(line):
line.strip()
body, result = line.split('->')
fun_name = funstr(result.strip())
fun_body = '''
def {name}():
result = precalc.get(\'{name}\')
if result is None:
result = {calc}
precalc[\'{name}\'] = result
return result
'''
calc = str()
for part in body.strip().split(' '):
if part.isupper():
calc += opstr(part) + ' '
elif part.islower():
calc += funstr(part) + '() '
else:
calc += part + ' '
return fun_body.format(name=fun_name, calc=calc)
with open('in.txt') as input, open('out.py', 'w+') as output:
output.write('precalc = dict()')
for line in input:
output.write(topython(line) + '\n')
output.write('print(a_fn())\n')
import out
|
normal
|
{
"blob_id": "e68588dff0e54fa03dbb1c629c39d8312a0df26d",
"index": 7230,
"step-1": "<mask token>\n\n\ndef opstr(op):\n if op == 'RSHIFT':\n return '>>'\n if op == 'LSHIFT':\n return '<<'\n if op == 'OR':\n return '|'\n if op == 'AND':\n return '&'\n if op == 'NOT':\n return '~'\n raise RuntimeError('Unknown {0}'.format(op))\n\n\ndef funstr(fun):\n return '{0}_fn'.format(fun)\n\n\ndef topython(line):\n line.strip()\n body, result = line.split('->')\n fun_name = funstr(result.strip())\n fun_body = \"\"\"\ndef {name}():\n result = precalc.get('{name}')\n if result is None:\n result = {calc}\n precalc['{name}'] = result\n return result\n\"\"\"\n calc = str()\n for part in body.strip().split(' '):\n if part.isupper():\n calc += opstr(part) + ' '\n elif part.islower():\n calc += funstr(part) + '() '\n else:\n calc += part + ' '\n return fun_body.format(name=fun_name, calc=calc)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef opstr(op):\n if op == 'RSHIFT':\n return '>>'\n if op == 'LSHIFT':\n return '<<'\n if op == 'OR':\n return '|'\n if op == 'AND':\n return '&'\n if op == 'NOT':\n return '~'\n raise RuntimeError('Unknown {0}'.format(op))\n\n\ndef funstr(fun):\n return '{0}_fn'.format(fun)\n\n\ndef topython(line):\n line.strip()\n body, result = line.split('->')\n fun_name = funstr(result.strip())\n fun_body = \"\"\"\ndef {name}():\n result = precalc.get('{name}')\n if result is None:\n result = {calc}\n precalc['{name}'] = result\n return result\n\"\"\"\n calc = str()\n for part in body.strip().split(' '):\n if part.isupper():\n calc += opstr(part) + ' '\n elif part.islower():\n calc += funstr(part) + '() '\n else:\n calc += part + ' '\n return fun_body.format(name=fun_name, calc=calc)\n\n\nwith open('in.txt') as input, open('out.py', 'w+') as output:\n output.write('precalc = dict()')\n for line in input:\n output.write(topython(line) + '\\n')\n output.write('print(a_fn())\\n')\n<mask token>\n",
"step-3": "input = open('in.txt')\noutput = open('out.py', 'w+')\n\n\ndef opstr(op):\n if op == 'RSHIFT':\n return '>>'\n if op == 'LSHIFT':\n return '<<'\n if op == 'OR':\n return '|'\n if op == 'AND':\n return '&'\n if op == 'NOT':\n return '~'\n raise RuntimeError('Unknown {0}'.format(op))\n\n\ndef funstr(fun):\n return '{0}_fn'.format(fun)\n\n\ndef topython(line):\n line.strip()\n body, result = line.split('->')\n fun_name = funstr(result.strip())\n fun_body = \"\"\"\ndef {name}():\n result = precalc.get('{name}')\n if result is None:\n result = {calc}\n precalc['{name}'] = result\n return result\n\"\"\"\n calc = str()\n for part in body.strip().split(' '):\n if part.isupper():\n calc += opstr(part) + ' '\n elif part.islower():\n calc += funstr(part) + '() '\n else:\n calc += part + ' '\n return fun_body.format(name=fun_name, calc=calc)\n\n\nwith open('in.txt') as input, open('out.py', 'w+') as output:\n output.write('precalc = dict()')\n for line in input:\n output.write(topython(line) + '\\n')\n output.write('print(a_fn())\\n')\n<mask token>\n",
"step-4": "input = open('in.txt')\noutput = open('out.py', 'w+')\n\n\ndef opstr(op):\n if op == 'RSHIFT':\n return '>>'\n if op == 'LSHIFT':\n return '<<'\n if op == 'OR':\n return '|'\n if op == 'AND':\n return '&'\n if op == 'NOT':\n return '~'\n raise RuntimeError('Unknown {0}'.format(op))\n\n\ndef funstr(fun):\n return '{0}_fn'.format(fun)\n\n\ndef topython(line):\n line.strip()\n body, result = line.split('->')\n fun_name = funstr(result.strip())\n fun_body = \"\"\"\ndef {name}():\n result = precalc.get('{name}')\n if result is None:\n result = {calc}\n precalc['{name}'] = result\n return result\n\"\"\"\n calc = str()\n for part in body.strip().split(' '):\n if part.isupper():\n calc += opstr(part) + ' '\n elif part.islower():\n calc += funstr(part) + '() '\n else:\n calc += part + ' '\n return fun_body.format(name=fun_name, calc=calc)\n\n\nwith open('in.txt') as input, open('out.py', 'w+') as output:\n output.write('precalc = dict()')\n for line in input:\n output.write(topython(line) + '\\n')\n output.write('print(a_fn())\\n')\nimport out\n",
"step-5": "input = open('in.txt')\noutput = open('out.py', 'w+')\n\ndef opstr(op):\n if op == 'RSHIFT': return '>>'\n if op == 'LSHIFT': return '<<'\n if op == 'OR': return '|'\n if op == 'AND': return '&'\n if op == 'NOT': return '~'\n raise RuntimeError('Unknown {0}'.format(op))\n\n\ndef funstr(fun):\n return '{0}_fn'.format(fun)\n\n\ndef topython(line):\n line.strip()\n body, result = line.split('->')\n fun_name = funstr(result.strip())\n fun_body = '''\ndef {name}():\n result = precalc.get(\\'{name}\\')\n if result is None:\n result = {calc}\n precalc[\\'{name}\\'] = result\n return result\n'''\n calc = str()\n for part in body.strip().split(' '):\n if part.isupper():\n calc += opstr(part) + ' '\n elif part.islower():\n calc += funstr(part) + '() '\n else:\n calc += part + ' '\n \n return fun_body.format(name=fun_name, calc=calc) \n\nwith open('in.txt') as input, open('out.py', 'w+') as output:\n output.write('precalc = dict()')\n for line in input:\n output.write(topython(line) + '\\n')\n\n output.write('print(a_fn())\\n')\n\nimport out\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class TestTiming(TestCase):
def test_decompose_ns(self):
duration: int = 234
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
<|reserved_special_token_0|>
def test_decompose_ms(self):
duration: int = 1023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,
'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_parse_decomposed_duration_ns(self):
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
<|reserved_special_token_0|>
def test_parse_decomposed_duration_ms(self):
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,
'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_parse_decomposed_duration_h(self):
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,
's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
duration_ns: int = 7995125885088
formatted_duration: str = format_duration(duration_ns)
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTiming(TestCase):
def test_decompose_ns(self):
duration: int = 234
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
<|reserved_special_token_0|>
def test_decompose_ms(self):
duration: int = 1023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,
'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
duration: int = 45001023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,
'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
duration: int = 65001023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,
's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
duration: int = 7995125885088
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,
'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
<|reserved_special_token_0|>
def test_parse_decomposed_duration_ms(self):
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,
'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,
'μs'), (456, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
<|reserved_special_token_0|>
def test_parse_decomposed_duration_h(self):
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,
's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
duration_ns: int = 7995125885088
formatted_duration: str = format_duration(duration_ns)
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestTiming(TestCase):
def test_decompose_ns(self):
duration: int = 234
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
<|reserved_special_token_0|>
def test_decompose_ms(self):
duration: int = 1023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,
'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
duration: int = 45001023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,
'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
duration: int = 65001023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,
's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
duration: int = 7995125885088
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,
'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
<|reserved_special_token_0|>
def test_parse_decomposed_duration_ms(self):
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,
'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,
'μs'), (456, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_min(self):
decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,
'ms'), (23, 'μs'), (456, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '1 min 5 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_h(self):
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,
's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
duration_ns: int = 7995125885088
formatted_duration: str = format_duration(duration_ns)
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from typing import List, Tuple
from unittest import TestCase
from solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration
class TestTiming(TestCase):
def test_decompose_ns(self):
duration: int = 234
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_us(self):
duration: int = 23456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456,
'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_ms(self):
duration: int = 1023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,
'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
duration: int = 45001023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,
'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
duration: int = 65001023456
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,
's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
duration: int = 7995125885088
decomposition: List[Tuple[int, str]] = decompose(duration)
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,
'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_us(self):
decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '23.456 μs'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_ms(self):
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,
'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,
'μs'), (456, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_min(self):
decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,
'ms'), (23, 'μs'), (456, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '1 min 5 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_h(self):
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,
's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
parsed_duration: str = parse_decomposed_duration(decomposition)
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
duration_ns: int = 7995125885088
formatted_duration: str = format_duration(duration_ns)
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
def test_format_duration_us(self):
duration_ns: int = 23456
formatted_duration: str = format_duration(duration_ns)
expected_formatted_duration: str = '23.456 μs'
self.assertEqual(expected_formatted_duration, formatted_duration)
<|reserved_special_token_1|>
from typing import List, Tuple
from unittest import TestCase
from solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration
class TestTiming(TestCase):
def test_decompose_ns(self):
# Given
duration: int = 234
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_us(self):
# Given
duration: int = 23456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_ms(self):
# Given
duration: int = 1023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_s(self):
# Given
duration: int = 45001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_min(self):
# Given
duration: int = 65001023456
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_decompose_h(self):
# Given
duration: int = 7995125885088
# When
decomposition: List[Tuple[int, str]] = decompose(duration)
# Then
expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),
(125, 'ms'), (885, 'μs'), (88, 'ns')]
self.assertListEqual(expected_decomposition, decomposition)
def test_parse_decomposed_duration_ns(self):
# Given
decomposition: List[Tuple[int, str]] = [(234, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '234 ns'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_us(self):
# Given
decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '23.456 μs'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_ms(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1.023 ms'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_s(self):
# Given
decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '45.001 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_min(self):
# Given
decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '1 min 5 s'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_parse_decomposed_duration_h(self):
# Given
decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]
# When
parsed_duration: str = parse_decomposed_duration(decomposition)
# Then
expected_parsed_duration: str = '2 h 13 min'
self.assertEqual(expected_parsed_duration, parsed_duration)
def test_format_duration_h(self):
# Given
duration_ns: int = 7995125885088
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '2 h 13 min'
self.assertEqual(expected_formatted_duration, formatted_duration)
def test_format_duration_us(self):
# Given
duration_ns: int = 23456
# When
formatted_duration: str = format_duration(duration_ns)
# Then
expected_formatted_duration: str = '23.456 μs'
self.assertEqual(expected_formatted_duration, formatted_duration)
|
flexible
|
{
"blob_id": "afecbb46a98fbf6b5c26f5b6c8026aec035fadf1",
"index": 6696,
"step-1": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n <mask token>\n <mask token>\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n <mask token>\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n <mask token>\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n <mask token>\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n <mask token>\n",
"step-4": "from typing import List, Tuple\nfrom unittest import TestCase\nfrom solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n duration: int = 234\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_us(self):\n duration: int = 23456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456,\n 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_ms(self):\n duration: int = 1023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n duration: int = 45001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n duration: int = 65001023456\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5,\n 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n duration: int = 7995125885088\n decomposition: List[Tuple[int, str]] = decompose(duration)\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13,\n 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_us(self):\n decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '23.456 μs'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_ms(self):\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456,\n 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23,\n 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1,\n 'ms'), (23, 'μs'), (456, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15,\n 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n parsed_duration: str = parse_decomposed_duration(decomposition)\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n duration_ns: int = 7995125885088\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n\n def test_format_duration_us(self):\n duration_ns: int = 23456\n formatted_duration: str = format_duration(duration_ns)\n expected_formatted_duration: str = '23.456 μs'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n",
"step-5": "from typing import List, Tuple\nfrom unittest import TestCase\n\nfrom solutions.python.common.timing import decompose, parse_decomposed_duration, format_duration\n\n\nclass TestTiming(TestCase):\n\n def test_decompose_ns(self):\n # Given\n duration: int = 234\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_us(self):\n # Given\n duration: int = 23456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_ms(self):\n # Given\n duration: int = 1023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_s(self):\n # Given\n duration: int = 45001023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_min(self):\n # Given\n duration: int = 65001023456\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_decompose_h(self):\n # Given\n duration: int = 7995125885088\n\n # When\n decomposition: List[Tuple[int, str]] = decompose(duration)\n\n # Then\n expected_decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'),\n (125, 'ms'), (885, 'μs'), (88, 'ns')]\n self.assertListEqual(expected_decomposition, decomposition)\n\n def test_parse_decomposed_duration_ns(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(234, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '234 ns'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_us(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '23.456 μs'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_ms(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '1.023 ms'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_s(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(45, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '45.001 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_min(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(1, 'min'), (5, 's'), (1, 'ms'), (23, 'μs'), (456, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '1 min 5 s'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_parse_decomposed_duration_h(self):\n # Given\n decomposition: List[Tuple[int, str]] = [(2, 'h'), (13, 'min'), (15, 's'), (125, 'ms'), (885, 'μs'), (88, 'ns')]\n\n # When\n parsed_duration: str = parse_decomposed_duration(decomposition)\n\n # Then\n expected_parsed_duration: str = '2 h 13 min'\n self.assertEqual(expected_parsed_duration, parsed_duration)\n\n def test_format_duration_h(self):\n # Given\n duration_ns: int = 7995125885088\n\n # When\n formatted_duration: str = format_duration(duration_ns)\n\n # Then\n expected_formatted_duration: str = '2 h 13 min'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n\n def test_format_duration_us(self):\n # Given\n duration_ns: int = 23456\n\n # When\n formatted_duration: str = format_duration(duration_ns)\n\n # Then\n expected_formatted_duration: str = '23.456 μs'\n self.assertEqual(expected_formatted_duration, formatted_duration)\n",
"step-ids": [
7,
11,
12,
16,
17
]
}
|
[
7,
11,
12,
16,
17
] |
<|reserved_special_token_0|>
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
<|reserved_special_token_0|>
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
img = plt.imread(os.path.join(self.root, img_name)) / 255
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate((img, img, img), 2)
density_map = np.load(os.path.join(self.root.replace('images',
'density_maps'), img_name.replace('.jpg', '.npy')))
if self.downsample > 1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img, (cols * self.downsample, rows * self.
downsample))
img = img.transpose((2, 0, 1))
density_map = cv2.resize(density_map, (cols, rows))
density_map = density_map[np.newaxis, :, :
] * self.downsample * self.downsample
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
img = plt.imread(os.path.join(self.root, img_name)) / 255
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate((img, img, img), 2)
density_map = np.load(os.path.join(self.root.replace('images',
'density_maps'), img_name.replace('.jpg', '.npy')))
if self.downsample > 1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img, (cols * self.downsample, rows * self.
downsample))
img = img.transpose((2, 0, 1))
density_map = cv2.resize(density_map, (cols, rows))
density_map = density_map[np.newaxis, :, :
] * self.downsample * self.downsample
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
img = plt.imread(os.path.join(self.root, img_name)) / 255
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate((img, img, img), 2)
density_map = np.load(os.path.join(self.root.replace('images',
'density_maps'), img_name.replace('.jpg', '.npy')))
if self.downsample > 1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img, (cols * self.downsample, rows * self.
downsample))
img = img.transpose((2, 0, 1))
density_map = cv2.resize(density_map, (cols, rows))
density_map = density_map[np.newaxis, :, :
] * self.downsample * self.downsample
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
if __name__ == '__main__':
root = (
'/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'
)
dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[
0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)
index = random.randint(0, len(dataset))
img, dmap = dataset[index]
print(index, img.shape, dmap.shape)
<|reserved_special_token_1|>
import cv2
import random
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
img = plt.imread(os.path.join(self.root, img_name)) / 255
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
img = np.concatenate((img, img, img), 2)
density_map = np.load(os.path.join(self.root.replace('images',
'density_maps'), img_name.replace('.jpg', '.npy')))
if self.downsample > 1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img, (cols * self.downsample, rows * self.
downsample))
img = img.transpose((2, 0, 1))
density_map = cv2.resize(density_map, (cols, rows))
density_map = density_map[np.newaxis, :, :
] * self.downsample * self.downsample
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
if __name__ == '__main__':
root = (
'/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'
)
dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[
0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)
index = random.randint(0, len(dataset))
img, dmap = dataset[index]
print(index, img.shape, dmap.shape)
<|reserved_special_token_1|>
import cv2
import random
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from torchvision import transforms
class ShanghaiTechPartA(Dataset):
def __init__(self, root, shuffle=False, transform=None, downsample=1):
self.root = root
self.shuffle = shuffle
self.transform = transform
self.downsample = downsample
self.image_names = [filename for filename in os.listdir(self.root)]
self.n_samples = len(self.image_names)
if self.shuffle:
random.shuffle(self.image_names)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
img_name = self.image_names[index]
# Read image and normalize its pixels to [0,1]
img = plt.imread(os.path.join(self.root,img_name)) / 255
# Expand grayscale image to three channel.
if len(img.shape) == 2:
img = img[:,:,np.newaxis]
img = np.concatenate((img,img,img),2)
# Read ground truth density-map
density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy')))
# Downsample image and density-map to match model's input
if self.downsample >1:
rows = int(img.shape[0] // self.downsample)
cols = int(img.shape[1] // self.downsample)
img = cv2.resize(img,(cols*self.downsample, rows*self.downsample))
img = img.transpose((2,0,1)) # convert to order (channel,rows,cols)
density_map = cv2.resize(density_map, (cols,rows))
density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample
# transform image and density_map to tensors
img_tensor = torch.tensor(img, dtype=torch.float)
density_map_tensor = torch.tensor(density_map, dtype=torch.float)
# Apply any other transformation
if self.transform is not None:
img_tensor = self.transform(img_tensor)
return img_tensor, density_map_tensor
# Test code
if __name__== "__main__":
root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'
dataset = ShanghaiTechPartA(root,
transform=transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
downsample=8)
index = random.randint(0, len(dataset))
img, dmap = dataset[index]
print(index, img.shape, dmap.shape)
|
flexible
|
{
"blob_id": "8a0a98ab072e46463d80d8638c830e6db0032a77",
"index": 8101,
"step-1": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n <mask token>\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\nif __name__ == '__main__':\n root = (\n '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n )\n dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[\n 0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n",
"step-4": "import cv2\nimport random\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\nif __name__ == '__main__':\n root = (\n '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n )\n dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[\n 0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n",
"step-5": "import cv2\nimport random\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass ShanghaiTechPartA(Dataset):\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n # Read image and normalize its pixels to [0,1]\n img = plt.imread(os.path.join(self.root,img_name)) / 255\n # Expand grayscale image to three channel.\n if len(img.shape) == 2:\n img = img[:,:,np.newaxis]\n img = np.concatenate((img,img,img),2)\n\n # Read ground truth density-map\n density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy')))\n\n # Downsample image and density-map to match model's input\n if self.downsample >1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img,(cols*self.downsample, rows*self.downsample))\n img = img.transpose((2,0,1)) # convert to order (channel,rows,cols)\n density_map = cv2.resize(density_map, (cols,rows))\n density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample\n # transform image and density_map to tensors\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n # Apply any other transformation\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n\n return img_tensor, density_map_tensor\n\n\n# Test code\nif __name__== \"__main__\":\n root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n dataset = ShanghaiTechPartA(root,\n transform=transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
Script: coverage.py
Identifies domains that only occur in multi-domain proteins. The main
script is master.
--------------------
Felix A Kruger
momo.sander@ebi.ac.uk
"""
####
#### import modules.
####
import queryDevice
import operator
import yaml
import time
####
#### Load parameters.
####
paramFile = open('local.yaml')
params = yaml.safe_load(paramFile)
paramFile.close()
#### Define functions.
#-----------------------------------------------------------------------------------------------------------------------
def get_el_targets(params):
"""Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]
"""
data = queryDevice.queryDevice("""
SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)
FROM assays ass
JOIN(
SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc
FROM target_dictionary td
JOIN target_components tc
ON tc.tid = td.tid
JOIN component_sequences cs
ON cs.component_id = tc.component_id
JOIN component_domains cd
ON cd.component_id = cs.component_id
WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')
GROUP BY td.tid
) as dc
ON dc.tid = ass.tid
JOIN activities act
ON act.assay_id = ass.assay_id
WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')
AND ass.relationship_type = 'D'
AND assay_type IN('B')
AND act.standard_relation IN('=')
AND standard_units = 'nM'
AND standard_value <= %s
GROUP BY dc.tid ORDER BY COUNT(activity_id)""" % (int(params['threshold']) * 1000) , params)
print "retrieved data for ", len(data), "tids."
return data
#-----------------------------------------------------------------------------------------------------------------------
def readfile(path, key_name, val_name):
"""Read two columns from a tab-separated file into a dictionary.
Inputs:
path -- filepath
key_name -- name of the column holding the key
val_name -- name of the column holding the value
"""
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lkp = {}
els = lines[0].rstrip().split('\t')
for i, el in enumerate(els):
if el == key_name:
key_idx = i
if el == val_name:
val_idx = i
for line in lines[1:]:
elements = line.rstrip().split('\t')
lkp[elements[key_idx]] = elements[val_idx]
return lkp
#-----------------------------------------------------------------------------------------------------------------------
def get_archs(el_targets, pfam_lkp):
"""Find multi-domain architectures.
Inputs:
el_targets -- list of eligible targets
"""
act_lkp = {}
arch_lkp = {}
dom_lkp = {}
for ent in el_targets:
try:
doms = pfam_lkp[ent[0]]
except KeyError:
print "no doms in ", ent[0]
arch = ', '.join(sorted(doms))
try:
arch_lkp[arch] += 1
act_lkp[arch] += ent[4]
except KeyError:
arch_lkp[arch] = 1
act_lkp[arch] = ent[4]
if len(doms) <= 1:
continue
for dom in set(doms):
try:
dom_lkp[dom] += 1
except KeyError:
dom_lkp[dom] = 1
return(arch_lkp, dom_lkp, act_lkp)
#-----------------------------------------------------------------------------------------------------------------------
def get_doms(tids, params):
"""Get domains for a list of tids.
Inputs:
el_targets -- list of eligible targets
"""
pfam_lkp = {}
tidstr = "', '".join(str(t) for t in tids)
data = queryDevice.queryDevice("""
SELECT tid, domain_name
FROM target_components tc
JOIN component_domains cd
ON cd.component_id = tc.component_id
JOIN domains d
ON d.domain_id = cd.domain_id
WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'""" %tidstr, params)
for ent in data:
tid = ent[0]
dom = ent[1]
try:
pfam_lkp[tid].append(dom)
except KeyError:
pfam_lkp[tid] = [dom]
return pfam_lkp
#-----------------------------------------------------------------------------------------------------------------------
def count_valid(lkp, valid_doms):
"""Get count of architectures and activities covered by the mapping.
"""
valz = []
for arch in lkp.keys():
valid = False
doms = arch.split(', ')
for dom in doms:
if dom in valid_doms:
valid = True
break
valz.append((lkp[arch], valid))
valid = sum([x[0] for x in valz if x[1]])
allz = sum([x[0] for x in valz])
valid_archs = len([x[0] for x in valz if x[1]])
all_archs = len(sum([x[0] for x in valz]))
out = open('data/log.tab', 'a')
timestamp = time.strftime('%d %B %Y %T', time.gmtime())
comment = "only binding assays"
release = params['release']
threshold = params['threshold']
out.write("%(valid)s\t%(allz)s\t%(release)s\t%(threshold)s\t%(comment)s\t%(timestamp)s\t%(valid_archs)s\t%(all_archs)s\n"
% locals())
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_archs(arch_lkp, valid_doms, path):
'''Write out multi-domain architectures in markdown tables.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)
out = open('%s.md' % path ,'w')
out.write('|architecture|count|mapped|\n')
out.write('|:-----------|:---------|-----:|\n')
for arch in sorted_archs:
doms = str(arch[0]).split(', ')
if len(doms) <= 1:
continue
mapped = ', '.join([x for x in doms if x in valid_doms])
if len(mapped) == 0:
mapped = False
out.write("|%s|%s|%s|\n"%(arch[0], arch[1], mapped))
#-----------------------------------------------------------------------------------------------------------------------
def export_network(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
count = arch_lkp[arch]
if type(doms) is str:
continue
for i in range(len(doms)-1):
for j in range(i+1, len(doms)):
dom_key = ', '.join(sorted([doms[i],doms[j]]))
try:
lkp[dom_key] += count
except KeyError:
lkp[dom_key] = count
out = open('%s.tab' % path ,'w')
out.write('dom_1\tdom_2\tcount\n')
for link in lkp.keys():
doms = str(link).split(', ')
out.write("%s\t%s\t%s\n"%(doms[0], doms[1], lkp[link]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_attribs(arch_lkp, valid_doms, path):
'''Write out network file.
Inputs:
arch_lkp -- dictionary of multi-domain architectures.
'''
out = open('%s.tab' % path ,'w')
out.write('dom\tvalid\n')
lkp = {}
for arch in arch_lkp.keys():
doms = arch.split(', ')
if len(doms) <= 1:
continue
for dom in doms:
valid = False
if dom in valid_doms:
valid = True
lkp[dom] = valid
for it in lkp.items():
out.write("%s\t%s\n"%(it[0], it[1]))
out.close()
#-----------------------------------------------------------------------------------------------------------------------
def export_doms(dom_lkp, valid_doms, path):
'''Write out identified architectures in markdown tables.
Inputs:
dom_lkp -- dictionary of domains occuring in multi-domain architectures.
'''
sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)
out = open('%s.md' % path ,'w')
out.write('|domain |count| validated|\n')
out.write('|:-----------|:-----|-------:|\n')
for dom in sorted_doms:
mapped = False
count = dom[1]
dom = str(dom[0])
if dom in valid_doms:
mapped = True
out.write("|%s|%s|%s|\n"%(dom, count, mapped))
#-----------------------------------------------------------------------------------------------------------------------
def master(version):
"""
Function: master
Run through all steps to identify mandatory muli-domain architectures.
"""
# Load the list of validated domains.
valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')
valid_doms = valid_dom_d.keys()
## Load eligible targets.
el_targets = get_el_targets(params)
## Get domains for tids.
pfam_lkp = get_doms([x[0] for x in el_targets], params)
## Add targets with given architecture.
(arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)
## Count covered acrchitectures.
count_valid(arch_lkp, valid_doms)
## Count covered activities.
count_valid(act_lkp, valid_doms)
## Write multi-domain architechtures to markdown tables.
export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])
## Write domains from multi-domain architechtures to markdown tables.
export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])
## export network file.
export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])
## export network attribute file.
export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])
#-----------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if len(sys.argv) != 2: # the program name and one argument
sys.exit("""Parameters are read from mpf.yaml but must specify
version for data/valid_pfam_v_%(version)s.tab""")
version = sys.argv[1]
master(version)
|
normal
|
{
"blob_id": "2467825d2cb01c86d3ba27562decc12551877af1",
"index": 457,
"step-1": "\"\"\"\n Script: coverage.py\n Identifies domains that only occur in multi-domain proteins. The main\n script is master.\n --------------------\n Felix A Kruger\n momo.sander@ebi.ac.uk\n\"\"\"\n####\n#### import modules.\n####\nimport queryDevice\nimport operator\nimport yaml\nimport time\n####\n#### Load parameters.\n####\nparamFile = open('local.yaml')\nparams = yaml.safe_load(paramFile)\nparamFile.close()\n\n\n\n\n#### Define functions.\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef get_el_targets(params):\n \"\"\"Query the ChEMBL database for (almost) all activities that are subject to the mapping. Does not conver activities expressed in log-conversion eg pIC50 etc. This function works with chembl_15 upwards. Outputs a list of tuples [(tid, target_type, domain_count, assay_count, act_count),...]\n \"\"\"\n data = queryDevice.queryDevice(\"\"\"\n SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)\n FROM assays ass\n JOIN(\n SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc\n FROM target_dictionary td\n JOIN target_components tc\n ON tc.tid = td.tid\n\t\t JOIN component_sequences cs\n\t\t\tON cs.component_id = tc.component_id\n JOIN component_domains cd\n \t\t\tON cd.component_id = cs.component_id\n WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')\n GROUP BY td.tid\n ) as dc\n ON dc.tid = ass.tid\n JOIN activities act\n ON act.assay_id = ass.assay_id\n WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')\n AND ass.relationship_type = 'D'\n AND assay_type IN('B')\n AND act.standard_relation IN('=')\n AND standard_units = 'nM'\n AND standard_value <= %s\n GROUP BY dc.tid ORDER BY COUNT(activity_id)\"\"\" % (int(params['threshold']) * 1000) , params)\n print \"retrieved data for \", len(data), \"tids.\"\n return data\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef readfile(path, key_name, val_name):\n \"\"\"Read two columns from a tab-separated file into a dictionary.\n Inputs:\n path -- filepath\n key_name -- name of the column holding the key\n val_name -- name of the column holding the value\n \"\"\"\n infile = open(path, 'r')\n lines = infile.readlines()\n infile.close()\n lkp = {}\n els = lines[0].rstrip().split('\\t')\n for i, el in enumerate(els):\n if el == key_name:\n key_idx = i\n if el == val_name:\n val_idx = i\n for line in lines[1:]:\n elements = line.rstrip().split('\\t')\n lkp[elements[key_idx]] = elements[val_idx]\n return lkp\n\n#-----------------------------------------------------------------------------------------------------------------------\n\ndef get_archs(el_targets, pfam_lkp):\n \"\"\"Find multi-domain architectures.\n Inputs:\n el_targets -- list of eligible targets\n \"\"\"\n act_lkp = {}\n arch_lkp = {}\n dom_lkp = {}\n for ent in el_targets:\n try:\n doms = pfam_lkp[ent[0]]\n except KeyError:\n print \"no doms in \", ent[0]\n arch = ', '.join(sorted(doms))\n try:\n arch_lkp[arch] += 1\n act_lkp[arch] += ent[4]\n except KeyError:\n arch_lkp[arch] = 1\n act_lkp[arch] = ent[4]\n if len(doms) <= 1:\n continue\n for dom in set(doms):\n try:\n dom_lkp[dom] += 1\n except KeyError:\n dom_lkp[dom] = 1\n return(arch_lkp, dom_lkp, act_lkp)\n\n#-----------------------------------------------------------------------------------------------------------------------\n\ndef get_doms(tids, params):\n \"\"\"Get domains for a list of tids.\n Inputs:\n el_targets -- list of eligible targets\n \"\"\"\n pfam_lkp = {}\n tidstr = \"', '\".join(str(t) for t in tids)\n data = queryDevice.queryDevice(\"\"\"\n SELECT tid, domain_name\n FROM target_components tc\n\t JOIN component_domains cd\n\t ON cd.component_id = tc.component_id\n JOIN domains d\n\t ON d.domain_id = cd.domain_id\n WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'\"\"\" %tidstr, params)\n for ent in data:\n tid = ent[0]\n dom = ent[1]\n try:\n pfam_lkp[tid].append(dom)\n except KeyError:\n pfam_lkp[tid] = [dom]\n return pfam_lkp\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef count_valid(lkp, valid_doms):\n \"\"\"Get count of architectures and activities covered by the mapping.\n \"\"\"\n valz = []\n for arch in lkp.keys():\n valid = False\n doms = arch.split(', ')\n for dom in doms:\n if dom in valid_doms:\n valid = True\n break\n valz.append((lkp[arch], valid))\n valid = sum([x[0] for x in valz if x[1]])\n allz = sum([x[0] for x in valz])\n valid_archs = len([x[0] for x in valz if x[1]])\n all_archs = len(sum([x[0] for x in valz]))\n out = open('data/log.tab', 'a')\n timestamp = time.strftime('%d %B %Y %T', time.gmtime())\n comment = \"only binding assays\"\n release = params['release']\n threshold = params['threshold']\n out.write(\"%(valid)s\\t%(allz)s\\t%(release)s\\t%(threshold)s\\t%(comment)s\\t%(timestamp)s\\t%(valid_archs)s\\t%(all_archs)s\\n\"\n % locals())\n out.close()\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef export_archs(arch_lkp, valid_doms, path):\n '''Write out multi-domain architectures in markdown tables.\n Inputs:\n arch_lkp -- dictionary of multi-domain architectures.\n '''\n sorted_archs = sorted(arch_lkp.iteritems(), key=operator.itemgetter(1), reverse = True)\n out = open('%s.md' % path ,'w')\n out.write('|architecture|count|mapped|\\n')\n out.write('|:-----------|:---------|-----:|\\n')\n for arch in sorted_archs:\n doms = str(arch[0]).split(', ')\n if len(doms) <= 1:\n continue\n mapped = ', '.join([x for x in doms if x in valid_doms])\n if len(mapped) == 0:\n mapped = False\n out.write(\"|%s|%s|%s|\\n\"%(arch[0], arch[1], mapped))\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef export_network(arch_lkp, valid_doms, path):\n '''Write out network file.\n Inputs:\n arch_lkp -- dictionary of multi-domain architectures.\n '''\n lkp = {}\n for arch in arch_lkp.keys():\n doms = arch.split(', ')\n if len(doms) <= 1:\n continue\n count = arch_lkp[arch]\n if type(doms) is str:\n continue\n for i in range(len(doms)-1):\n for j in range(i+1, len(doms)):\n dom_key = ', '.join(sorted([doms[i],doms[j]]))\n try:\n lkp[dom_key] += count\n except KeyError:\n lkp[dom_key] = count\n out = open('%s.tab' % path ,'w')\n out.write('dom_1\\tdom_2\\tcount\\n')\n for link in lkp.keys():\n doms = str(link).split(', ')\n out.write(\"%s\\t%s\\t%s\\n\"%(doms[0], doms[1], lkp[link]))\n out.close()\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef export_attribs(arch_lkp, valid_doms, path):\n '''Write out network file.\n Inputs:\n arch_lkp -- dictionary of multi-domain architectures.\n '''\n out = open('%s.tab' % path ,'w')\n out.write('dom\\tvalid\\n')\n lkp = {}\n for arch in arch_lkp.keys():\n doms = arch.split(', ')\n if len(doms) <= 1:\n continue\n for dom in doms:\n valid = False\n if dom in valid_doms:\n valid = True\n lkp[dom] = valid\n for it in lkp.items():\n out.write(\"%s\\t%s\\n\"%(it[0], it[1]))\n out.close()\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\ndef export_doms(dom_lkp, valid_doms, path):\n '''Write out identified architectures in markdown tables.\n Inputs:\n dom_lkp -- dictionary of domains occuring in multi-domain architectures.\n '''\n sorted_doms = sorted(dom_lkp.iteritems(), key=operator.itemgetter(1), reverse= True)\n out = open('%s.md' % path ,'w')\n out.write('|domain |count| validated|\\n')\n out.write('|:-----------|:-----|-------:|\\n')\n for dom in sorted_doms:\n mapped = False\n count = dom[1]\n dom = str(dom[0])\n if dom in valid_doms:\n mapped = True\n out.write(\"|%s|%s|%s|\\n\"%(dom, count, mapped))\n\n#-----------------------------------------------------------------------------------------------------------------------\n\ndef master(version):\n \"\"\"\n Function: master\n Run through all steps to identify mandatory muli-domain architectures.\n \"\"\"\n # Load the list of validated domains.\n valid_dom_d = readfile('data/valid_pfam_v_%(version)s.tab' % locals(), 'pfam_a', 'pfam_a')\n valid_doms = valid_dom_d.keys()\n ## Load eligible targets.\n el_targets = get_el_targets(params)\n ## Get domains for tids.\n pfam_lkp = get_doms([x[0] for x in el_targets], params)\n ## Add targets with given architecture.\n (arch_lkp, dom_lkp, act_lkp) = get_archs(el_targets, pfam_lkp)\n ## Count covered acrchitectures.\n count_valid(arch_lkp, valid_doms)\n ## Count covered activities.\n count_valid(act_lkp, valid_doms)\n ## Write multi-domain architechtures to markdown tables.\n export_archs(arch_lkp, valid_doms, 'data/multi_dom_archs_%s'% params['release'])\n ## Write domains from multi-domain architechtures to markdown tables.\n export_doms(dom_lkp, valid_doms, 'data/multi_dom_doms_%s'% params['release'])\n ## export network file.\n export_network(arch_lkp, valid_doms, 'data/multi_dom_network_%s'% params['release'])\n ## export network attribute file.\n export_attribs(arch_lkp, valid_doms, 'data/multi_dom_attributes_%s'% params['release'])\n\n\n#-----------------------------------------------------------------------------------------------------------------------\n\n\nif __name__ == '__main__':\n import sys\n\n if len(sys.argv) != 2: # the program name and one argument\n sys.exit(\"\"\"Parameters are read from mpf.yaml but must specify\n\t\t \t version for data/valid_pfam_v_%(version)s.tab\"\"\")\n version = sys.argv[1]\n\n master(version)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getBzResult(search_str):
ans_list = get_search_res('bugzilla', 'text', search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def crossdomain(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'
] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res('bugzilla', 'text', search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5555)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.debug = True
<|reserved_special_token_0|>
def crossdomain(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'
] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res('bugzilla', 'text', search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5555)
<|reserved_special_token_1|>
#-*- coding:utf-8 -*-
'''
'''
from flask import Flask, jsonify
app = Flask(__name__)
app.debug = True
from datetime import timedelta
from flask import make_response, request, current_app, render_template
from functools import update_wrapper
import json
from subprocess import *
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res("bugzilla", "text", search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
#raise Exception('xyz')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res("ikb", "kb", search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":\".*%s.*\"}}}' %(query)
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type)
child = Popen(["curl", es_url, "-d", str(search_dsl).lower().encode('string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
#sorted to get the latest item
#newlist = list(reversed(sorted(ans_list, key=lambda k: k['id'])))
return ans_list
@app.route("/regexSearch")
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
#raise Exception('xyz')
res['res'] = 'success'
res['data'] = render_template('search_result.html', results = results)
return render_template('search_result.html', results = results)
@app.route("/DefaultError")
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5555)
|
flexible
|
{
"blob_id": "70c78021a2544ea372545b037ed55298c26391d1",
"index": 1182,
"step-1": "<mask token>\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'\n ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n",
"step-4": "<mask token>\napp = Flask(__name__)\napp.debug = True\n<mask token>\n\n\ndef crossdomain(origin=None, methods=None, headers=None, max_age=21600,\n attach_to_all=True, automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'\n ] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\n\ndef getBzResult(search_str):\n ans_list = get_search_res('bugzilla', 'text', search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n return ans_list\n\n\ndef getIkbResult(search_str):\n ans_list = get_search_res('ikb', 'kb', search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\".*%s.*\"}}}' % query\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (\n index, doc_type)\n child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(\n 'string-escape')], stdout=PIPE)\n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {}\n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n return ans_list\n\n\n@app.route('/regexSearch')\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results=results)\n return render_template('search_result.html', results=results)\n\n\n@app.route('/DefaultError')\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n",
"step-5": "#-*- coding:utf-8 -*-\n'''\n'''\nfrom flask import Flask, jsonify\napp = Flask(__name__)\napp.debug = True\nfrom datetime import timedelta\nfrom flask import make_response, request, current_app, render_template\nfrom functools import update_wrapper\nimport json\n\nfrom subprocess import * \n\ndef crossdomain(origin=None, methods=None, headers=None,\n max_age=21600, attach_to_all=True,\n automatic_options=True):\n if methods is not None:\n methods = ', '.join(sorted(x.upper() for x in methods))\n if headers is not None and not isinstance(headers, basestring):\n headers = ', '.join(x.upper() for x in headers)\n if not isinstance(origin, basestring):\n origin = ', '.join(origin)\n if isinstance(max_age, timedelta):\n max_age = max_age.total_seconds()\n\n def get_methods():\n if methods is not None:\n return methods\n\n options_resp = current_app.make_default_options_response()\n return options_resp.headers['allow']\n\n def decorator(f):\n def wrapped_function(*args, **kwargs):\n if automatic_options and request.method == 'OPTIONS':\n resp = current_app.make_default_options_response()\n else:\n resp = make_response(f(*args, **kwargs))\n if not attach_to_all and request.method != 'OPTIONS':\n return resp\n\n h = resp.headers\n h['Access-Control-Allow-Origin'] = origin\n h['Access-Control-Allow-Methods'] = get_methods()\n h['Access-Control-Max-Age'] = str(max_age)\n h['Access-Control-Allow-Credentials'] = 'true'\n h['Access-Control-Allow-Headers'] = \\\n \"Origin, X-Requested-With, Content-Type, Accept, Authorization\"\n if headers is not None:\n h['Access-Control-Allow-Headers'] = headers\n return resp\n\n f.provide_automatic_options = False\n return update_wrapper(wrapped_function, f)\n return decorator\n\ndef getBzResult(search_str):\n ans_list = get_search_res(\"bugzilla\", \"text\", search_str)\n for i in ans_list:\n i['bug_id'] = i.pop('id')\n #raise Exception('xyz')\n return ans_list\n\ndef getIkbResult(search_str):\n ans_list = get_search_res(\"ikb\", \"kb\", search_str)\n for i in ans_list:\n i['kb_id'] = i.pop('id')\n return ans_list\n\ndef get_search_res(index, doc_type, query):\n ans = {}\n search_dsl = '{\"query\":{\"regexp\":{\"text\":\\\".*%s.*\\\"}}}' %(query)\n es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type)\n child = Popen([\"curl\", es_url, \"-d\", str(search_dsl).lower().encode('string-escape')], stdout=PIPE) \n json_res = child.communicate(None)[0]\n jres = json.loads(json_res)\n ans_list = []\n for item in jres['hits']['hits']:\n cur = {} \n cur['id'] = item['_id']\n cur['summary'] = item['_source']['summary']\n ans_list.append(cur)\n #sorted to get the latest item\n #newlist = list(reversed(sorted(ans_list, key=lambda k: k['id'])))\n \n return ans_list\n\n@app.route(\"/regexSearch\")\n@crossdomain(origin='*')\ndef regexSearch():\n res = dict()\n para = request.args\n data = para.get('data', '').strip()\n data = json.loads(data)\n results = list()\n for regexItem in data:\n bzResult = getBzResult(regexItem)\n ikbResult = getIkbResult(regexItem)\n results.append([regexItem, bzResult, ikbResult])\n #raise Exception('xyz')\n res['res'] = 'success'\n res['data'] = render_template('search_result.html', results = results)\n\n return render_template('search_result.html', results = results)\n\n@app.route(\"/DefaultError\")\n@crossdomain(origin='*')\ndef defaultError():\n return render_template('stop_sign.html')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5555)\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
import heapq
from util import edit_distance
def autocomplete(suggest_tree, bktree, prefix, count=5):
"""Suggest top completions for a prefix given a SuggestTree and BKTree.
Completions for a given prefix are weighted primarily by their weight in the
suggest tree, and secondarily by their Levenshtein distance to words in the
BK-tree (where nearby words are weighted higher)."""
completion_weights = suggest_tree.completion_weights(prefix)
if completion_weights:
weight = lambda completion: completion_weights[completion]
proximity = lambda completion: completion_proximity_score(
prefix, completion)
selection_criteria = lambda completion: (
weight(completion), proximity(completion))
completions = completion_weights.keys()
return heapq.nlargest(count, completions, key=selection_criteria)
else:
matches = bktree.search(prefix)
proximity = lambda completion: edit_distance(prefix, completion)
return heapq.nsmallest(count, matches, key=proximity)
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float("inf")
else:
return 1.0 / float(len(completion))
|
normal
|
{
"blob_id": "24891cdefcd061f04e7b7768b1bde4e32b78adcc",
"index": 8690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-3": "<mask token>\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(prefix,\n completion)\n selection_criteria = lambda completion: (weight(completion),\n proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-4": "import heapq\nfrom util import edit_distance\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(prefix,\n completion)\n selection_criteria = lambda completion: (weight(completion),\n proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-5": "import heapq\nfrom util import edit_distance\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(\n prefix, completion)\n selection_criteria = lambda completion: (\n weight(completion), proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n \ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float(\"inf\")\n else:\n return 1.0 / float(len(completion))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sb.set(style='ticks')
<|reserved_special_token_0|>
os.chdir(
'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'
)
<|reserved_special_token_0|>
lon_bnds[:, 0] += 360.0
<|reserved_special_token_0|>
print(df)
<|reserved_special_token_0|>
plt.title('Glacial minus Late Holocene change in $\\delta^{15}$N$_{org}$',
family='sans-serif', fontsize=12)
<|reserved_special_token_0|>
proj.drawcoastlines(linewidth=0.5, color='k')
proj.fillcontinents(color='grey')
<|reserved_special_token_0|>
proj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=
[True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,
fontsize=12, family='sans-serif')
proj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=
[True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12
)
<|reserved_special_token_0|>
plt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),
ncol=2, frameon=False, scatterpoints=1)
plt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)
<|reserved_special_token_0|>
cbar.ax.set_ylabel(u'$\\delta^{15}$N ‰ vs air', fontsize=12, family=
'sans-serif')
plt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)
fig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')
fig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sb.set(style='ticks')
<|reserved_special_token_0|>
os.chdir(
'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'
)
data = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')
mk3lpi_no3 = data.variables['no3'][...]
mk3lpi_n15 = data.variables['no3_15'][...]
mk3lpi_no3 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_no3)
mk3lpi_n15 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_n15)
mk3lpi_d15n = (mk3lpi_n15 / (mk3lpi_no3 - mk3lpi_n15) - 1) * 1000
mk3lpi_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')
mk3lpidust_no3 = data.variables['no3'][...]
mk3lpidust_n15 = data.variables['no3_15'][...]
mk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_no3)
mk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_n15)
mk3lpidust_d15n = (mk3lpidust_n15 / (mk3lpidust_no3 - mk3lpidust_n15) - 1
) * 1000
mk3lpidust_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')
mk3llgm_no3 = data.variables['no3'][...]
mk3llgm_n15 = data.variables['no3_15'][...]
mk3llgm_no3 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_no3)
mk3llgm_n15 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_n15)
mk3llgm_d15n = (mk3llgm_n15 / (mk3llgm_no3 - mk3llgm_n15) - 1) * 1000
mk3llgm_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')
mk3llgmdust_no3 = data.variables['no3'][...]
mk3llgmdust_n15 = data.variables['no3_15'][...]
mk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_no3)
mk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_n15)
mk3llgmdust_d15n = (mk3llgmdust_n15 / (mk3llgmdust_no3 - mk3llgmdust_n15) - 1
) * 1000
mk3llgmdust_d15org = data.variables['sed_d15n'][...]
grid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')
dvts = grid.variables['dvts'][...]
dats = grid.variables['dats'][...]
lats = grid.variables['latts'][...]
lat_bnds = grid.variables['latts_bnds'][...]
lons = grid.variables['lonts'][...]
lon_bnds = grid.variables['lonts_bnds'][...]
lon_bnds[:, 0] += 360.0
deps = grid.variables['zts'][...]
dep_bnds = grid.variables['zts_bnds'][...]
zts = dvts / dats
deps3d = np.cumsum(zts, axis=0)
mk3lpi_d15org_cor = mk3lpi_d15org + 0.9 * (deps3d * 0.001)
mk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9 * (deps3d * 0.001)
mk3llgm_d15org_cor = mk3llgm_d15org + 0.9 * (deps3d * 0.001)
mk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9 * (deps3d * 0.001)
mk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)
mk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0,
weights=zts)
mk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)
mk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0,
weights=zts)
df = pd.read_csv('Supplementary Data 1.csv')
print(df)
records = df[~np.isnan(df['d15n_LateH'])]
bulk_records = records[records['type'] == 'bulk']
bound_records = records[records['type'] != 'bulk']
lat_labs = ['80$^{\\circ}$S', '60$^{\\circ}$S', '40$^{\\circ}$S',
'20$^{\\circ}$S', '0$^{\\circ}$', '20$^{\\circ}$N', '40$^{\\circ}$N',
'60$^{\\circ}$N', '80$^{\\circ}$N']
lon_labs = ['0$^{\\circ}$E', '50$^{\\circ}$E', '100$^{\\circ}$E',
'150$^{\\circ}$E', '200$^{\\circ}$E', '250$^{\\circ}$E',
'300$^{\\circ}$E', '350$^{\\circ}$E']
domain = [-45, 0, 45, 355]
domain_draw = [-40, 0, 40, 360]
dlat = 20
dlon = 60
xx, yy = np.meshgrid(lons, lats)
levs = np.arange(-5, 5.1, 0.5)
conts = [-1, 1]
fig = plt.figure(facecolor='w', figsize=(10, 4))
plt.title('Glacial minus Late Holocene change in $\\delta^{15}$N$_{org}$',
family='sans-serif', fontsize=12)
proj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[
1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')
lonproj, latproj = proj(xx, yy)
bulk_x, bulk_y = proj(np.array(bulk_records['lon']), np.array(bulk_records[
'lat']))
bound_x, bound_y = proj(np.array(bound_records['lon']), np.array(
bound_records['lat']))
proj.drawcoastlines(linewidth=0.5, color='k')
proj.fillcontinents(color='grey')
p3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz -
mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, levels=levs,
vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')
c3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz -
mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths
=0.5, linestyle='-')
s31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM'] -
bound_records['d15n_LateH'], marker='*', vmin=np.ma.min(levs), vmax=np.
ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=
1.0, zorder=3)
s32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM'] -
bulk_records['d15n_LateH'], marker='o', vmin=np.ma.min(levs), vmax=np.
ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=
1.0, zorder=2)
proj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=
[True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,
fontsize=12, family='sans-serif')
proj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=
[True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12
)
<|reserved_special_token_0|>
elements = [Line2D([0], [0], marker='o', markerfacecolor='w',
markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label=
'Bulk organic N'), Line2D([0], [0], marker='*', markerfacecolor='w',
markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label=
'Bound organic N')]
plt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),
ncol=2, frameon=False, scatterpoints=1)
plt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)
cbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])
cbar = plt.colorbar(p3, cax=cbax, orientation='vertical')
cbar.ax.set_ylabel(u'$\\delta^{15}$N ‰ vs air', fontsize=12, family=
'sans-serif')
plt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)
fig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')
fig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import numpy as np
import netCDF4 as nc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cmocean.cm as cmo
import seaborn as sb
sb.set(style='ticks')
import mpl_toolkits.basemap as bm
import pandas as pd
os.chdir(
'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'
)
data = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')
mk3lpi_no3 = data.variables['no3'][...]
mk3lpi_n15 = data.variables['no3_15'][...]
mk3lpi_no3 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_no3)
mk3lpi_n15 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_n15)
mk3lpi_d15n = (mk3lpi_n15 / (mk3lpi_no3 - mk3lpi_n15) - 1) * 1000
mk3lpi_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')
mk3lpidust_no3 = data.variables['no3'][...]
mk3lpidust_n15 = data.variables['no3_15'][...]
mk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_no3)
mk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_n15)
mk3lpidust_d15n = (mk3lpidust_n15 / (mk3lpidust_no3 - mk3lpidust_n15) - 1
) * 1000
mk3lpidust_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')
mk3llgm_no3 = data.variables['no3'][...]
mk3llgm_n15 = data.variables['no3_15'][...]
mk3llgm_no3 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_no3)
mk3llgm_n15 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_n15)
mk3llgm_d15n = (mk3llgm_n15 / (mk3llgm_no3 - mk3llgm_n15) - 1) * 1000
mk3llgm_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')
mk3llgmdust_no3 = data.variables['no3'][...]
mk3llgmdust_n15 = data.variables['no3_15'][...]
mk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_no3)
mk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_n15)
mk3llgmdust_d15n = (mk3llgmdust_n15 / (mk3llgmdust_no3 - mk3llgmdust_n15) - 1
) * 1000
mk3llgmdust_d15org = data.variables['sed_d15n'][...]
grid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')
dvts = grid.variables['dvts'][...]
dats = grid.variables['dats'][...]
lats = grid.variables['latts'][...]
lat_bnds = grid.variables['latts_bnds'][...]
lons = grid.variables['lonts'][...]
lon_bnds = grid.variables['lonts_bnds'][...]
lon_bnds[:, 0] += 360.0
deps = grid.variables['zts'][...]
dep_bnds = grid.variables['zts_bnds'][...]
zts = dvts / dats
deps3d = np.cumsum(zts, axis=0)
mk3lpi_d15org_cor = mk3lpi_d15org + 0.9 * (deps3d * 0.001)
mk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9 * (deps3d * 0.001)
mk3llgm_d15org_cor = mk3llgm_d15org + 0.9 * (deps3d * 0.001)
mk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9 * (deps3d * 0.001)
mk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)
mk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0,
weights=zts)
mk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)
mk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0,
weights=zts)
df = pd.read_csv('Supplementary Data 1.csv')
print(df)
records = df[~np.isnan(df['d15n_LateH'])]
bulk_records = records[records['type'] == 'bulk']
bound_records = records[records['type'] != 'bulk']
lat_labs = ['80$^{\\circ}$S', '60$^{\\circ}$S', '40$^{\\circ}$S',
'20$^{\\circ}$S', '0$^{\\circ}$', '20$^{\\circ}$N', '40$^{\\circ}$N',
'60$^{\\circ}$N', '80$^{\\circ}$N']
lon_labs = ['0$^{\\circ}$E', '50$^{\\circ}$E', '100$^{\\circ}$E',
'150$^{\\circ}$E', '200$^{\\circ}$E', '250$^{\\circ}$E',
'300$^{\\circ}$E', '350$^{\\circ}$E']
domain = [-45, 0, 45, 355]
domain_draw = [-40, 0, 40, 360]
dlat = 20
dlon = 60
xx, yy = np.meshgrid(lons, lats)
levs = np.arange(-5, 5.1, 0.5)
conts = [-1, 1]
fig = plt.figure(facecolor='w', figsize=(10, 4))
plt.title('Glacial minus Late Holocene change in $\\delta^{15}$N$_{org}$',
family='sans-serif', fontsize=12)
proj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[
1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')
lonproj, latproj = proj(xx, yy)
bulk_x, bulk_y = proj(np.array(bulk_records['lon']), np.array(bulk_records[
'lat']))
bound_x, bound_y = proj(np.array(bound_records['lon']), np.array(
bound_records['lat']))
proj.drawcoastlines(linewidth=0.5, color='k')
proj.fillcontinents(color='grey')
p3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz -
mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, levels=levs,
vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')
c3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz -
mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths
=0.5, linestyle='-')
s31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM'] -
bound_records['d15n_LateH'], marker='*', vmin=np.ma.min(levs), vmax=np.
ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=
1.0, zorder=3)
s32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM'] -
bulk_records['d15n_LateH'], marker='o', vmin=np.ma.min(levs), vmax=np.
ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=
1.0, zorder=2)
proj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=
[True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,
fontsize=12, family='sans-serif')
proj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=
[True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12
)
from matplotlib.lines import Line2D
elements = [Line2D([0], [0], marker='o', markerfacecolor='w',
markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label=
'Bulk organic N'), Line2D([0], [0], marker='*', markerfacecolor='w',
markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label=
'Bound organic N')]
plt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),
ncol=2, frameon=False, scatterpoints=1)
plt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)
cbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])
cbar = plt.colorbar(p3, cax=cbax, orientation='vertical')
cbar.ax.set_ylabel(u'$\\delta^{15}$N ‰ vs air', fontsize=12, family=
'sans-serif')
plt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)
fig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')
fig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 10:17:32 2018
@author: pearseb
"""
#%% imporst
import os
import numpy as np
import netCDF4 as nc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cmocean.cm as cmo
import seaborn as sb
sb.set(style='ticks')
import mpl_toolkits.basemap as bm
import pandas as pd
# move to working directory
os.chdir("C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication")
#%% get data
data = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')
mk3lpi_no3 = data.variables['no3'][...]
mk3lpi_n15 = data.variables['no3_15'][...]
mk3lpi_no3 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_no3)
mk3lpi_n15 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_n15)
mk3lpi_d15n = (mk3lpi_n15/(mk3lpi_no3-mk3lpi_n15)-1)*1000
mk3lpi_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')
mk3lpidust_no3 = data.variables['no3'][...]
mk3lpidust_n15 = data.variables['no3_15'][...]
mk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_no3)
mk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_n15)
mk3lpidust_d15n = (mk3lpidust_n15/(mk3lpidust_no3-mk3lpidust_n15)-1)*1000
mk3lpidust_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')
mk3llgm_no3 = data.variables['no3'][...]
mk3llgm_n15 = data.variables['no3_15'][...]
mk3llgm_no3 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_no3)
mk3llgm_n15 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_n15)
mk3llgm_d15n = (mk3llgm_n15/(mk3llgm_no3-mk3llgm_n15)-1)*1000
mk3llgm_d15org = data.variables['sed_d15n'][...]
data = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')
mk3llgmdust_no3 = data.variables['no3'][...]
mk3llgmdust_n15 = data.variables['no3_15'][...]
mk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_no3)
mk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_n15)
mk3llgmdust_d15n = (mk3llgmdust_n15/(mk3llgmdust_no3-mk3llgmdust_n15)-1)*1000
mk3llgmdust_d15org = data.variables['sed_d15n'][...]
grid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')
dvts = grid.variables['dvts'][...]
dats = grid.variables['dats'][...]
lats = grid.variables['latts'][...]
lat_bnds = grid.variables['latts_bnds'][...]
lons = grid.variables['lonts'][...]
lon_bnds = grid.variables['lonts_bnds'][...]
lon_bnds[:,0] += 360.0
deps = grid.variables['zts'][...]
dep_bnds = grid.variables['zts_bnds'][...]
zts = dvts/dats
#%% apply depth correction to d15N of organic matter (see Robinson et al., 2012, Paleoceanography)
deps3d = np.cumsum(zts,axis=0)
# correction
mk3lpi_d15org_cor = mk3lpi_d15org + 0.9*(deps3d*1e-3)
mk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9*(deps3d*1e-3)
mk3llgm_d15org_cor = mk3llgm_d15org + 0.9*(deps3d*1e-3)
mk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9*(deps3d*1e-3)
# average over all depths
mk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)
mk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0, weights=zts)
mk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)
mk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0, weights=zts)
#%% collect prepared compilation of sedimentary d15N records
df = pd.read_csv('Supplementary Data 1.csv')
print(df)
records = df[~np.isnan(df['d15n_LateH'])]
bulk_records = records[records['type']=='bulk']
bound_records = records[records['type']!='bulk']
#%%
lat_labs = ['80$^{\circ}$S', '60$^{\circ}$S', '40$^{\circ}$S', '20$^{\circ}$S', '0$^{\circ}$', \
'20$^{\circ}$N', '40$^{\circ}$N', '60$^{\circ}$N', '80$^{\circ}$N']
lon_labs = ['0$^{\circ}$E', '50$^{\circ}$E', '100$^{\circ}$E', '150$^{\circ}$E', '200$^{\circ}$E', \
'250$^{\circ}$E', '300$^{\circ}$E', '350$^{\circ}$E']
domain = [-45,0,45,355]
domain_draw = [-40,0,40,360]
dlat=20
dlon=60
xx,yy = np.meshgrid(lons, lats)
#%%
levs = np.arange(-5,5.1,0.5)
conts = [-1,1]
fig = plt.figure(facecolor='w', figsize=(10,4))
plt.title('Glacial minus Late Holocene change in $\delta^{15}$N$_{org}$', family='sans-serif', fontsize=12)
proj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')
lonproj, latproj = proj(xx, yy)
bulk_x, bulk_y = proj(np.array(bulk_records['lon']),np.array(bulk_records['lat']))
bound_x, bound_y = proj(np.array(bound_records['lon']),np.array(bound_records['lat']))
proj.drawcoastlines(linewidth=0.5, color='k')
proj.fillcontinents(color='grey')
p3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, \
levels=levs, vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')
c3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths=0.5, linestyle='-')
s31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM']-bound_records['d15n_LateH'], \
marker='*', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \
alpha=0.75, edgecolor='k', linewidths=1.0, zorder=3)
s32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM']-bulk_records['d15n_LateH'], \
marker='o', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \
alpha=0.75, edgecolor='k', linewidths=1.0, zorder=2)
proj.drawparallels(range(domain_draw[0],domain_draw[2]+1,dlat), labels=[True,False,False,False], color=(.3,.3,.3), linewidth=0, fontsize=12, family='sans-serif')
proj.drawmeridians(range(domain_draw[1],domain_draw[3]+1,dlon), labels=[True,False,False,True], color=(.3,.3,.3), linewidth=0, fontsize=12)
from matplotlib.lines import Line2D
elements = [Line2D([0], [0], marker='o', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label='Bulk organic N'),\
Line2D([0], [0], marker='*', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label='Bound organic N')]
plt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5,-0.25), ncol=2, frameon=False, scatterpoints=1)
plt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)
cbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])
cbar = plt.colorbar(p3, cax=cbax, orientation='vertical')
cbar.ax.set_ylabel(u'$\delta^{15}$N ‰ vs air', fontsize=12, family='sans-serif')
plt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)
fig.savefig('figures_for_publication/fig4.pdf',dpi=300,bbox_inches='tight')
fig.savefig('figures_for_publication/fig4.png',dpi=300,bbox_inches='tight')
|
flexible
|
{
"blob_id": "635b02e03578d44f13530bd57ab1a99987d4909d",
"index": 5987,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsb.set(style='ticks')\n<mask token>\nos.chdir(\n 'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'\n )\n<mask token>\nlon_bnds[:, 0] += 360.0\n<mask token>\nprint(df)\n<mask token>\nplt.title('Glacial minus Late Holocene change in $\\\\delta^{15}$N$_{org}$',\n family='sans-serif', fontsize=12)\n<mask token>\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\n<mask token>\nproj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=\n [True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,\n fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=\n [True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12\n )\n<mask token>\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),\n ncol=2, frameon=False, scatterpoints=1)\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\n<mask token>\ncbar.ax.set_ylabel(u'$\\\\delta^{15}$N ‰ vs air', fontsize=12, family=\n 'sans-serif')\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\nfig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')\n",
"step-3": "<mask token>\nsb.set(style='ticks')\n<mask token>\nos.chdir(\n 'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'\n )\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')\nmk3lpi_no3 = data.variables['no3'][...]\nmk3lpi_n15 = data.variables['no3_15'][...]\nmk3lpi_no3 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_no3)\nmk3lpi_n15 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_n15)\nmk3lpi_d15n = (mk3lpi_n15 / (mk3lpi_no3 - mk3lpi_n15) - 1) * 1000\nmk3lpi_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')\nmk3lpidust_no3 = data.variables['no3'][...]\nmk3lpidust_n15 = data.variables['no3_15'][...]\nmk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_no3)\nmk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_n15)\nmk3lpidust_d15n = (mk3lpidust_n15 / (mk3lpidust_no3 - mk3lpidust_n15) - 1\n ) * 1000\nmk3lpidust_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')\nmk3llgm_no3 = data.variables['no3'][...]\nmk3llgm_n15 = data.variables['no3_15'][...]\nmk3llgm_no3 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_no3)\nmk3llgm_n15 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_n15)\nmk3llgm_d15n = (mk3llgm_n15 / (mk3llgm_no3 - mk3llgm_n15) - 1) * 1000\nmk3llgm_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')\nmk3llgmdust_no3 = data.variables['no3'][...]\nmk3llgmdust_n15 = data.variables['no3_15'][...]\nmk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_no3)\nmk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_n15)\nmk3llgmdust_d15n = (mk3llgmdust_n15 / (mk3llgmdust_no3 - mk3llgmdust_n15) - 1\n ) * 1000\nmk3llgmdust_d15org = data.variables['sed_d15n'][...]\ngrid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')\ndvts = grid.variables['dvts'][...]\ndats = grid.variables['dats'][...]\nlats = grid.variables['latts'][...]\nlat_bnds = grid.variables['latts_bnds'][...]\nlons = grid.variables['lonts'][...]\nlon_bnds = grid.variables['lonts_bnds'][...]\nlon_bnds[:, 0] += 360.0\ndeps = grid.variables['zts'][...]\ndep_bnds = grid.variables['zts_bnds'][...]\nzts = dvts / dats\ndeps3d = np.cumsum(zts, axis=0)\nmk3lpi_d15org_cor = mk3lpi_d15org + 0.9 * (deps3d * 0.001)\nmk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9 * (deps3d * 0.001)\nmk3llgm_d15org_cor = mk3llgm_d15org + 0.9 * (deps3d * 0.001)\nmk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9 * (deps3d * 0.001)\nmk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)\nmk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0,\n weights=zts)\nmk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)\nmk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0,\n weights=zts)\ndf = pd.read_csv('Supplementary Data 1.csv')\nprint(df)\nrecords = df[~np.isnan(df['d15n_LateH'])]\nbulk_records = records[records['type'] == 'bulk']\nbound_records = records[records['type'] != 'bulk']\nlat_labs = ['80$^{\\\\circ}$S', '60$^{\\\\circ}$S', '40$^{\\\\circ}$S',\n '20$^{\\\\circ}$S', '0$^{\\\\circ}$', '20$^{\\\\circ}$N', '40$^{\\\\circ}$N',\n '60$^{\\\\circ}$N', '80$^{\\\\circ}$N']\nlon_labs = ['0$^{\\\\circ}$E', '50$^{\\\\circ}$E', '100$^{\\\\circ}$E',\n '150$^{\\\\circ}$E', '200$^{\\\\circ}$E', '250$^{\\\\circ}$E',\n '300$^{\\\\circ}$E', '350$^{\\\\circ}$E']\ndomain = [-45, 0, 45, 355]\ndomain_draw = [-40, 0, 40, 360]\ndlat = 20\ndlon = 60\nxx, yy = np.meshgrid(lons, lats)\nlevs = np.arange(-5, 5.1, 0.5)\nconts = [-1, 1]\nfig = plt.figure(facecolor='w', figsize=(10, 4))\nplt.title('Glacial minus Late Holocene change in $\\\\delta^{15}$N$_{org}$',\n family='sans-serif', fontsize=12)\nproj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[\n 1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')\nlonproj, latproj = proj(xx, yy)\nbulk_x, bulk_y = proj(np.array(bulk_records['lon']), np.array(bulk_records[\n 'lat']))\nbound_x, bound_y = proj(np.array(bound_records['lon']), np.array(\n bound_records['lat']))\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\np3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, levels=levs,\n vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')\nc3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths\n =0.5, linestyle='-')\ns31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM'] -\n bound_records['d15n_LateH'], marker='*', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=3)\ns32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM'] -\n bulk_records['d15n_LateH'], marker='o', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=2)\nproj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=\n [True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,\n fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=\n [True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12\n )\n<mask token>\nelements = [Line2D([0], [0], marker='o', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label=\n 'Bulk organic N'), Line2D([0], [0], marker='*', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label=\n 'Bound organic N')]\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),\n ncol=2, frameon=False, scatterpoints=1)\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\ncbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])\ncbar = plt.colorbar(p3, cax=cbax, orientation='vertical')\ncbar.ax.set_ylabel(u'$\\\\delta^{15}$N ‰ vs air', fontsize=12, family=\n 'sans-serif')\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\nfig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')\n",
"step-4": "<mask token>\nimport os\nimport numpy as np\nimport netCDF4 as nc\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cmocean.cm as cmo\nimport seaborn as sb\nsb.set(style='ticks')\nimport mpl_toolkits.basemap as bm\nimport pandas as pd\nos.chdir(\n 'C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication'\n )\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')\nmk3lpi_no3 = data.variables['no3'][...]\nmk3lpi_n15 = data.variables['no3_15'][...]\nmk3lpi_no3 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_no3)\nmk3lpi_n15 = np.ma.masked_where(mk3lpi_no3 < 0.1, mk3lpi_n15)\nmk3lpi_d15n = (mk3lpi_n15 / (mk3lpi_no3 - mk3lpi_n15) - 1) * 1000\nmk3lpi_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')\nmk3lpidust_no3 = data.variables['no3'][...]\nmk3lpidust_n15 = data.variables['no3_15'][...]\nmk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_no3)\nmk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3 < 0.1, mk3lpidust_n15)\nmk3lpidust_d15n = (mk3lpidust_n15 / (mk3lpidust_no3 - mk3lpidust_n15) - 1\n ) * 1000\nmk3lpidust_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')\nmk3llgm_no3 = data.variables['no3'][...]\nmk3llgm_n15 = data.variables['no3_15'][...]\nmk3llgm_no3 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_no3)\nmk3llgm_n15 = np.ma.masked_where(mk3llgm_no3 < 0.1, mk3llgm_n15)\nmk3llgm_d15n = (mk3llgm_n15 / (mk3llgm_no3 - mk3llgm_n15) - 1) * 1000\nmk3llgm_d15org = data.variables['sed_d15n'][...]\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')\nmk3llgmdust_no3 = data.variables['no3'][...]\nmk3llgmdust_n15 = data.variables['no3_15'][...]\nmk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_no3)\nmk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3 < 0.1, mk3llgmdust_n15)\nmk3llgmdust_d15n = (mk3llgmdust_n15 / (mk3llgmdust_no3 - mk3llgmdust_n15) - 1\n ) * 1000\nmk3llgmdust_d15org = data.variables['sed_d15n'][...]\ngrid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')\ndvts = grid.variables['dvts'][...]\ndats = grid.variables['dats'][...]\nlats = grid.variables['latts'][...]\nlat_bnds = grid.variables['latts_bnds'][...]\nlons = grid.variables['lonts'][...]\nlon_bnds = grid.variables['lonts_bnds'][...]\nlon_bnds[:, 0] += 360.0\ndeps = grid.variables['zts'][...]\ndep_bnds = grid.variables['zts_bnds'][...]\nzts = dvts / dats\ndeps3d = np.cumsum(zts, axis=0)\nmk3lpi_d15org_cor = mk3lpi_d15org + 0.9 * (deps3d * 0.001)\nmk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9 * (deps3d * 0.001)\nmk3llgm_d15org_cor = mk3llgm_d15org + 0.9 * (deps3d * 0.001)\nmk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9 * (deps3d * 0.001)\nmk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)\nmk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0,\n weights=zts)\nmk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)\nmk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0,\n weights=zts)\ndf = pd.read_csv('Supplementary Data 1.csv')\nprint(df)\nrecords = df[~np.isnan(df['d15n_LateH'])]\nbulk_records = records[records['type'] == 'bulk']\nbound_records = records[records['type'] != 'bulk']\nlat_labs = ['80$^{\\\\circ}$S', '60$^{\\\\circ}$S', '40$^{\\\\circ}$S',\n '20$^{\\\\circ}$S', '0$^{\\\\circ}$', '20$^{\\\\circ}$N', '40$^{\\\\circ}$N',\n '60$^{\\\\circ}$N', '80$^{\\\\circ}$N']\nlon_labs = ['0$^{\\\\circ}$E', '50$^{\\\\circ}$E', '100$^{\\\\circ}$E',\n '150$^{\\\\circ}$E', '200$^{\\\\circ}$E', '250$^{\\\\circ}$E',\n '300$^{\\\\circ}$E', '350$^{\\\\circ}$E']\ndomain = [-45, 0, 45, 355]\ndomain_draw = [-40, 0, 40, 360]\ndlat = 20\ndlon = 60\nxx, yy = np.meshgrid(lons, lats)\nlevs = np.arange(-5, 5.1, 0.5)\nconts = [-1, 1]\nfig = plt.figure(facecolor='w', figsize=(10, 4))\nplt.title('Glacial minus Late Holocene change in $\\\\delta^{15}$N$_{org}$',\n family='sans-serif', fontsize=12)\nproj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[\n 1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')\nlonproj, latproj = proj(xx, yy)\nbulk_x, bulk_y = proj(np.array(bulk_records['lon']), np.array(bulk_records[\n 'lat']))\nbound_x, bound_y = proj(np.array(bound_records['lon']), np.array(\n bound_records['lat']))\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\np3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, levels=levs,\n vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')\nc3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz -\n mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths\n =0.5, linestyle='-')\ns31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM'] -\n bound_records['d15n_LateH'], marker='*', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=3)\ns32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM'] -\n bulk_records['d15n_LateH'], marker='o', vmin=np.ma.min(levs), vmax=np.\n ma.max(levs), cmap=cmo.balance, alpha=0.75, edgecolor='k', linewidths=\n 1.0, zorder=2)\nproj.drawparallels(range(domain_draw[0], domain_draw[2] + 1, dlat), labels=\n [True, False, False, False], color=(0.3, 0.3, 0.3), linewidth=0,\n fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1], domain_draw[3] + 1, dlon), labels=\n [True, False, False, True], color=(0.3, 0.3, 0.3), linewidth=0, fontsize=12\n )\nfrom matplotlib.lines import Line2D\nelements = [Line2D([0], [0], marker='o', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label=\n 'Bulk organic N'), Line2D([0], [0], marker='*', markerfacecolor='w',\n markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label=\n 'Bound organic N')]\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5, -0.25),\n ncol=2, frameon=False, scatterpoints=1)\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\ncbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])\ncbar = plt.colorbar(p3, cax=cbax, orientation='vertical')\ncbar.ax.set_ylabel(u'$\\\\delta^{15}$N ‰ vs air', fontsize=12, family=\n 'sans-serif')\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\nfig.savefig('figures_for_publication/fig4.pdf', dpi=300, bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png', dpi=300, bbox_inches='tight')\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 16 10:17:32 2018\n\n@author: pearseb\n\"\"\"\n\n#%% imporst\n \nimport os\nimport numpy as np\nimport netCDF4 as nc\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport cmocean.cm as cmo\nimport seaborn as sb\nsb.set(style='ticks')\nimport mpl_toolkits.basemap as bm\nimport pandas as pd \n\n\n# move to working directory\nos.chdir(\"C://Users/pearseb/Dropbox/PhD/My articles/nitrogen-carbon cycles/data_for_publication\")\n\n\n#%% get data\n\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-mod_default_N15active.nc', 'r')\nmk3lpi_no3 = data.variables['no3'][...]\nmk3lpi_n15 = data.variables['no3_15'][...]\nmk3lpi_no3 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_no3)\nmk3lpi_n15 = np.ma.masked_where(mk3lpi_no3<0.1, mk3lpi_n15)\nmk3lpi_d15n = (mk3lpi_n15/(mk3lpi_no3-mk3lpi_n15)-1)*1000\nmk3lpi_d15org = data.variables['sed_d15n'][...]\n\ndata = nc.Dataset('BGC_Mk3Lpi_Fe-2500per_default_N15active.nc', 'r')\nmk3lpidust_no3 = data.variables['no3'][...]\nmk3lpidust_n15 = data.variables['no3_15'][...]\nmk3lpidust_no3 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_no3)\nmk3lpidust_n15 = np.ma.masked_where(mk3lpidust_no3<0.1, mk3lpidust_n15)\nmk3lpidust_d15n = (mk3lpidust_n15/(mk3lpidust_no3-mk3lpidust_n15)-1)*1000\nmk3lpidust_d15org = data.variables['sed_d15n'][...]\n\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-mod_default_N15active.nc', 'r')\nmk3llgm_no3 = data.variables['no3'][...]\nmk3llgm_n15 = data.variables['no3_15'][...]\nmk3llgm_no3 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_no3)\nmk3llgm_n15 = np.ma.masked_where(mk3llgm_no3<0.1, mk3llgm_n15)\nmk3llgm_d15n = (mk3llgm_n15/(mk3llgm_no3-mk3llgm_n15)-1)*1000\nmk3llgm_d15org = data.variables['sed_d15n'][...]\n\ndata = nc.Dataset('BGC_Mk3Llgm_Fe-2500per_default_N15active.nc', 'r')\nmk3llgmdust_no3 = data.variables['no3'][...]\nmk3llgmdust_n15 = data.variables['no3_15'][...]\nmk3llgmdust_no3 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_no3)\nmk3llgmdust_n15 = np.ma.masked_where(mk3llgmdust_no3<0.1, mk3llgmdust_n15)\nmk3llgmdust_d15n = (mk3llgmdust_n15/(mk3llgmdust_no3-mk3llgmdust_n15)-1)*1000\nmk3llgmdust_d15org = data.variables['sed_d15n'][...]\n\ngrid = nc.Dataset('grid_spec_mk3l_128_112_21_v2.nc', 'r')\ndvts = grid.variables['dvts'][...]\ndats = grid.variables['dats'][...]\nlats = grid.variables['latts'][...]\nlat_bnds = grid.variables['latts_bnds'][...]\nlons = grid.variables['lonts'][...]\nlon_bnds = grid.variables['lonts_bnds'][...]\nlon_bnds[:,0] += 360.0\ndeps = grid.variables['zts'][...]\ndep_bnds = grid.variables['zts_bnds'][...]\nzts = dvts/dats\n\n\n\n#%% apply depth correction to d15N of organic matter (see Robinson et al., 2012, Paleoceanography)\n\ndeps3d = np.cumsum(zts,axis=0)\n\n# correction\nmk3lpi_d15org_cor = mk3lpi_d15org + 0.9*(deps3d*1e-3)\nmk3lpidust_d15org_cor = mk3lpidust_d15org + 0.9*(deps3d*1e-3)\n\nmk3llgm_d15org_cor = mk3llgm_d15org + 0.9*(deps3d*1e-3)\nmk3llgmdust_d15org_cor = mk3llgmdust_d15org + 0.9*(deps3d*1e-3)\n\n\n# average over all depths\nmk3lpi_d15org_corz = np.ma.average(mk3lpi_d15org_cor, axis=0, weights=zts)\nmk3lpidust_d15org_corz = np.ma.average(mk3lpidust_d15org_cor, axis=0, weights=zts)\n\nmk3llgm_d15org_corz = np.ma.average(mk3llgm_d15org_cor, axis=0, weights=zts)\nmk3llgmdust_d15org_corz = np.ma.average(mk3llgmdust_d15org_cor, axis=0, weights=zts)\n\n\n\n#%% collect prepared compilation of sedimentary d15N records\n\ndf = pd.read_csv('Supplementary Data 1.csv')\nprint(df)\n\nrecords = df[~np.isnan(df['d15n_LateH'])]\nbulk_records = records[records['type']=='bulk']\nbound_records = records[records['type']!='bulk']\n\n\n#%%\n\nlat_labs = ['80$^{\\circ}$S', '60$^{\\circ}$S', '40$^{\\circ}$S', '20$^{\\circ}$S', '0$^{\\circ}$', \\\n '20$^{\\circ}$N', '40$^{\\circ}$N', '60$^{\\circ}$N', '80$^{\\circ}$N']\nlon_labs = ['0$^{\\circ}$E', '50$^{\\circ}$E', '100$^{\\circ}$E', '150$^{\\circ}$E', '200$^{\\circ}$E', \\\n '250$^{\\circ}$E', '300$^{\\circ}$E', '350$^{\\circ}$E']\n\ndomain = [-45,0,45,355] \ndomain_draw = [-40,0,40,360]\ndlat=20\ndlon=60\n\nxx,yy = np.meshgrid(lons, lats)\n\n\n#%%\n\nlevs = np.arange(-5,5.1,0.5)\nconts = [-1,1]\n\nfig = plt.figure(facecolor='w', figsize=(10,4))\n\nplt.title('Glacial minus Late Holocene change in $\\delta^{15}$N$_{org}$', family='sans-serif', fontsize=12)\nproj = bm.Basemap(projection='merc', llcrnrlat=domain[0], llcrnrlon=domain[1], urcrnrlat=domain[2], urcrnrlon=domain[3], resolution='c')\nlonproj, latproj = proj(xx, yy)\n\nbulk_x, bulk_y = proj(np.array(bulk_records['lon']),np.array(bulk_records['lat']))\nbound_x, bound_y = proj(np.array(bound_records['lon']),np.array(bound_records['lat']))\n\nproj.drawcoastlines(linewidth=0.5, color='k')\nproj.fillcontinents(color='grey')\np3 = plt.contourf(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, cmap=cmo.balance, corner_mask=False, \\\n levels=levs, vmin=np.ma.min(levs), vmax=np.ma.max(levs), extend='both')\nc3 = plt.contour(lonproj, latproj, mk3lpidust_d15org_corz-mk3lpi_d15org_corz, colors='black', levels=conts, alpha=0.8, linewidths=0.5, linestyle='-')\ns31 = plt.scatter(bound_x, bound_y, s=150, c=bound_records['d15n_LGM']-bound_records['d15n_LateH'], \\\n marker='*', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \\\n alpha=0.75, edgecolor='k', linewidths=1.0, zorder=3)\ns32 = plt.scatter(bulk_x, bulk_y, s=40, c=bulk_records['d15n_LGM']-bulk_records['d15n_LateH'], \\\n marker='o', vmin=np.ma.min(levs), vmax=np.ma.max(levs), cmap=cmo.balance, \\\n alpha=0.75, edgecolor='k', linewidths=1.0, zorder=2)\n\nproj.drawparallels(range(domain_draw[0],domain_draw[2]+1,dlat), labels=[True,False,False,False], color=(.3,.3,.3), linewidth=0, fontsize=12, family='sans-serif')\nproj.drawmeridians(range(domain_draw[1],domain_draw[3]+1,dlon), labels=[True,False,False,True], color=(.3,.3,.3), linewidth=0, fontsize=12)\n\n\nfrom matplotlib.lines import Line2D\nelements = [Line2D([0], [0], marker='o', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=15, label='Bulk organic N'),\\\n Line2D([0], [0], marker='*', markerfacecolor='w', markeredgecolor='k', color='w', linewidth=1.0, markersize=20, label='Bound organic N')]\n\nplt.legend(handles=elements, loc='center', bbox_to_anchor=(0.5,-0.25), ncol=2, frameon=False, scatterpoints=1)\n\nplt.subplots_adjust(bottom=0.1, top=0.95, left=0.075, right=0.85)\ncbax = fig.add_axes([0.88, 0.25, 0.03, 0.55])\ncbar = plt.colorbar(p3, cax=cbax, orientation='vertical')\ncbar.ax.set_ylabel(u'$\\delta^{15}$N ‰ vs air', fontsize=12, family='sans-serif')\n\nplt.clabel(c3, manual=True, fmt='%i', fontsize=10, colors='k', inline=True)\n\n\nfig.savefig('figures_for_publication/fig4.pdf',dpi=300,bbox_inches='tight')\nfig.savefig('figures_for_publication/fig4.png',dpi=300,bbox_inches='tight')\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
import json
from datetime import datetime
from math import ceil, floor
from os.path import abspath, join, pardir
from struct import pack
from .global_settings import (
DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
)
# # # keep in mind: the faster numba optimized helper fct. cannot be used here,
# # # because numpy classes are not being used at this stage yet!
from .helpers import coord2int, inside_polygon, int2coord
# from helpers import coord2int, inside_polygon, int2coord
# from global_settings import (
# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,
# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,
# )
# import sys
# from os.path import dirname
#
# sys.path.insert(0, dirname(__file__))
# from helpers import coord2int, int2coord, inside_polygon
"""
TODO write tests
USE INSTRUCTIONS:
- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases
- unzip and place the combined.json inside this timezonefinder folder
- run this file_converter.py as a script until the compilation of the binary files is completed.
IMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster
and it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)
B = unsigned char (1byte = 8bit Integer)
H = unsigned short (2 byte integer)
I = unsigned 4byte integer
i = signed 4byte integer
Binaries being written:
[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)
poly_zone_ids: the related zone_id for every polygon ('<H')
poly_coord_amount: the amount of coordinates in every polygon ('<I')
poly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')
poly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)
poly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)
poly_nr2zone_id: the polygon number of the first polygon from every zone('<H')
[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)
hole_poly_ids: the related polygon_nr (=id) for every hole ('<H')
hole_coord_amount: the amount of coordinates in every hole ('<H')
hole_adr2data: address in hole_data.bin where data for every hole starts ('<I')
hole_data: coordinates for every hole (multiple times '<i')
[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.
-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts
shortcut here means storing for every cell in a grid of the world map which polygons are located in that cell
they can therefore be used to drastically reduce the amount of polygons which need to be checked in order to
decide which timezone a point is located in.
the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
this is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)
shortcuts_entry_amount: the amount of polygons for every shortcut ('<H')
shortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')
shortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')
shortcuts_unique_id: the zone id if only polygons from one zone are present,
a high number (with no corresponding zone) if not ('<H').
the majority of zones either have no polygons at all (sea) or just one zone.
this zone then can be instantly returned without actually testing polygons.
also stored extra binary if only one zone (to directly return that zone without checking)
statistics: (data version 2018g)
maximal amount of coordinates in one polygon: 139130
amount_of_holes: 219
amount of polygons: 1177
shortcut statistics:
highest entry amount is 46
frequencies of entry amounts (from 0 to max entries):
[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
relative accumulated frequencies [%]:
[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0]
58.92 % of all shortcuts are empty
highest amount of different zones in one shortcut is 7
frequencies of entry amounts (from 0 to max):
[76359, 45555, 6963, 672, 43, 6, 1, 1]
relative accumulated frequencies [%]:
[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]
[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]
--------------------------------
The number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)
The number of polygons is: 1177
The number of floats in all the polygons is (2 per point): 10887056
writing file " poly_nr2zone_id.bin "
Done
writing file " poly_zone_ids.bin "
writing file " poly_max_values.bin "
writing file " poly_data.bin "
writing file " poly_adr2data.bin "
writing file " poly_coord_amount.bin "
writing file " shortcuts_entry_amount.bin "
writing file " shortcuts_adr2data.bin "
writing file " shortcuts_data.bin "
writing file " shortcuts_unique_id.bin "
writing file " hole_poly_ids.bin "
writing file " hole_coord_amount.bin "
writing file " hole_adr2data.bin "
writing file " hole_data.bin "
the polygon data makes up 97.11 % of the data
the shortcuts make up 2.01 % of the data
holes make up 0.88 % of the data
"""
nr_of_lines = -1
all_tz_names = []
poly_zone_ids = []
all_boundaries = []
all_coords = []
all_lengths = []
amount_of_holes = 0
polynrs_of_holes = []
all_holes = []
all_hole_lengths = []
list_of_pointers = []
poly_nr2zone_id = []
shortcuts = {}
def x_shortcut(lng):
# higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)
# if lng < -180 or lng >= 180:
# raise ValueError('longitude out of bounds', lng)
return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)
def y_shortcut(lat):
# lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)
# if lat < -90 or lat >= 90:
# raise ValueError('this latitude is out of bounds', lat)
return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)
def big_zone(xmax, xmin, ymax, ymin):
# returns True if a zone with those boundaries could have more than 4 shortcuts
return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT
def percent(numerator, denominator):
return round((numerator / denominator) * 100, 2)
def accumulated_frequency(int_list):
out = []
total = sum(int_list)
acc = 0
for e in int_list:
acc += e
out.append(percent(acc, total))
return out
def ints_of(line=0):
x_coords, y_coords = all_coords[line]
return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]
def contained(x, y, x_coords, y_coords):
return inside_polygon(x, y, [x_coords, y_coords])
def unique(iterable):
out = []
for i in iterable:
if i not in out:
out.append(i)
return out
def point_between(p1, p2):
return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2
def get_shortcuts(x, y):
result = shortcuts.get((x, y))
if result is None:
return []
else:
return result
def _polygons(id_list):
for i in id_list:
yield all_coords[i]
def not_empty(iterable):
for i in iterable:
return True
return False
def polys_of_one_zone():
for i in range(len(timezone_names)):
start = poly_nr2zone_id[i]
end = poly_nr2zone_id[i + 1]
yield list(range(start, end))
def replace_entry(iterable, entry, substitute):
for i in range(len(iterable)):
if iterable[i] == entry:
iterable[i] = substitute
return iterable
def _holes_in_poly(poly_nr):
i = 0
for nr in polynrs_of_holes:
if nr == poly_nr:
yield all_holes[i]
i += 1
def parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):
global amount_of_holes
global nr_of_lines
global poly_zone_ids
print('Parsing data from {}\nthis could take a while...\n'.format(path))
tz_list = json.loads(open(path).read()).get('features')
# this counter just counts polygons, not holes!
polygon_counter = 0
current_zone_id = 0
print('holes found at: (poly_nr zone_name)')
for tz_dict in tz_list:
if DEBUG and polygon_counter > DEBUG_POLY_STOP:
break
tz_name = tz_dict.get('properties').get("tzid")
# print(tz_name)
all_tz_names.append(tz_name)
geometry = tz_dict.get("geometry")
if geometry.get('type') == 'MultiPolygon':
# depth is 4
multipolygon = geometry.get("coordinates")
else:
# depth is 3 (only one polygon, possibly with holes!)
multipolygon = [geometry.get("coordinates")]
# multipolygon has depth 4
# assert depth_of_array(multipolygon) == 4
for poly_with_hole in multipolygon:
# assert len(poly_with_hole) > 0
# the first entry is polygon
x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_coords.append((x_coords, y_coords))
# assert len(x_coords) > 0
all_lengths.append(len(x_coords))
all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))
poly_zone_ids.append(current_zone_id)
# everything else is interpreted as a hole!
for hole in poly_with_hole:
print(polygon_counter, tz_name)
# keep track of how many holes there are
amount_of_holes += 1
polynrs_of_holes.append(polygon_counter)
x_coords, y_coords = list(zip(*hole))
# IMPORTANT: do not use the last value (is equal to the first)!
x_coords = list(x_coords)
y_coords = list(y_coords)
x_coords.pop(-1)
y_coords.pop(-1)
all_holes.append((x_coords, y_coords))
all_hole_lengths.append(len(x_coords))
polygon_counter += 1
current_zone_id += 1
if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):
# 34621 in tz_world 2016d (small enough for int16)
# 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)
raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',
max(all_lengths))
if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):
# 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)
raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',
max(all_hole_lengths))
nr_of_lines = len(all_lengths)
if polygon_counter != nr_of_lines:
raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)
if nr_of_lines >= 2 ** (8 * NR_BYTES_H):
# 24k in tz_world 2016d
# 1022 in evansiroky/timezone-boundary-builder 2017a
raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',
nr_of_lines, 'polygons')
if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):
# 420 different zones in evansiroky/timezone-boundary-builder 2017a
# used in shortcuts_unique_id and poly_zone_ids
raise ValueError('zone id cannot be encoded as char (int8). the last id is',
poly_zone_ids[-1])
if 0 in all_lengths:
raise ValueError()
print('... parsing done.')
print('maximal amount of coordinates in one polygon:', max(all_lengths))
print('amount_of_holes:', amount_of_holes)
print('amount of polygons:', nr_of_lines)
print('\n')
def update_zone_names(path=TIMEZONE_NAMES_FILE):
global poly_zone_ids
global list_of_pointers
global all_boundaries
global all_coords
global all_lengths
global polynrs_of_holes
print('updating the zone names in {} now...'.format(path))
# pickle the zone names (python array)
with open(abspath(path), 'w') as f:
f.write(json.dumps(all_tz_names))
print('...Done.\n\nComputing where zones start and end...')
i = 0
last_id = -1
for zone_id in poly_zone_ids:
if zone_id != last_id:
poly_nr2zone_id.append(i)
if zone_id < last_id:
raise ValueError()
last_id = zone_id
i += 1
poly_nr2zone_id.append(i)
print('...Done.\n')
def compile_binaries():
global nr_of_lines
global shortcuts
def print_shortcut_statistics():
frequencies = []
max_val = max(*nr_of_entries_in_shortcut)
print('shortcut statistics:')
print('highest entry amount is', max_val)
while max_val >= 0:
frequencies.append(nr_of_entries_in_shortcut.count(max_val))
max_val -= 1
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max entries):')
print(frequencies)
empty_shortcuts = frequencies[0]
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\n')
amount_of_different_zones = []
for entry in shortcut_entries:
registered_zone_ids = []
for polygon_nr in entry:
id = poly_zone_ids[polygon_nr]
if id not in registered_zone_ids:
registered_zone_ids.append(id)
amount_of_different_zones.append(len(registered_zone_ids))
frequencies = []
max_val = max(*amount_of_different_zones)
print('highest amount of different zones in one shortcut is', max_val)
while max_val >= 1:
frequencies.append(amount_of_different_zones.count(max_val))
max_val -= 1
# show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)
frequencies.append(empty_shortcuts)
frequencies.reverse()
print('frequencies of entry amounts (from 0 to max):')
print(frequencies)
print('relative accumulated frequencies [%]:')
acc = accumulated_frequency(frequencies)
print(acc)
print([round(100 - x, 2) for x in acc])
print('--------------------------------\n')
def included_shortcut_row_nrs(max_lat, min_lat):
return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))
def included_shortcut_column_nrs(max_lng, min_lng):
return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))
def longitudes_to_check(max_lng, min_lng):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LNG
current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def latitudes_to_check(max_lat, min_lat):
output_list = []
step = 1 / NR_SHORTCUTS_PER_LAT
current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT
while current < end:
output_list.append(current)
current += step
output_list.append(end)
return output_list
def compute_x_intersection(y, x1, x2, y1, y2):
"""returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2
"""
delta_y = y2 - y1
if delta_y == 0:
return x1
return ((y - y1) * (x2 - x1) / delta_y) + x1
def compute_y_intersection(x, x1, x2, y1, y2):
"""returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2
"""
delta_x = x2 - x1
if delta_x == 0:
return x1
return ((x - x1) * (y2 - y1) / delta_x) + y1
def x_intersections(y, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if y_coords[i] <= y:
# print('Y1<=y')
if y_coords[iplus1] > y:
# this was a crossing. compute the intersect
# print('Y2>y')
intersects.append(
compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
# print('Y1>y')
if y_coords[iplus1] <= y:
# this was a crossing. compute the intersect
# print('Y2<=y')
intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def y_intersections(x, x_coords, y_coords):
intersects = []
for i in range(len(y_coords) - 1):
iplus1 = i + 1
if x_coords[i] <= x:
if x_coords[iplus1] > x:
# this was a crossing. compute the intersect
intersects.append(
compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))
else:
if x_coords[iplus1] <= x:
# this was a crossing. compute the intersect
intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],
y_coords[iplus1]))
return intersects
def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):
shortcuts_for_line = set()
# x_longs = binary_reader.x_coords_of(line)
x_longs, y_longs = ints_of(line)
# y_longs = binary_reader.y_coords_of(line)
y_longs.append(y_longs[0])
x_longs.append(x_longs[0])
step = 1 / NR_SHORTCUTS_PER_LAT
# print('checking the latitudes')
for lat in latitudes_to_check(ymax, ymin):
# print(lat)
# print(coordinate_to_longlong(lat))
# print(y_longs)
# print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))
# raise ValueError
intersects = sorted([int2coord(x) for x in
x_intersections(coord2int(lat), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
for i in range(0, nr_of_intersects, 2):
possible_longitudes = []
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut zone here!
# only select the top shortcut if it is actually inside the polygon (point a little up is inside)
if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))
# the bottom shortcut is always selected
shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))
else:
# add all the shortcuts for the whole found area of intersection
possible_y_shortcut = y_shortcut(lat)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(middle), coord2int(lat) + 1, x_longs,
y_longs):
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# the shortcut above and below of the intersection should be selected!
possible_y_shortcut_min1 = possible_y_shortcut - 1
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))
else:
# polygon does not cross the border!
while intersection_in < intersection_out:
possible_longitudes.append(intersection_in)
intersection_in += step
possible_longitudes.append(intersection_out)
# only the shortcut above of the intersection should be selected!
for possible_x_coord in possible_longitudes:
shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))
# print('now all the longitudes to check')
# same procedure horizontally
step = 1 / NR_SHORTCUTS_PER_LAT
for lng in longitudes_to_check(xmax, xmin):
# print(lng)
# print(coordinate_to_longlong(lng))
# print(x_longs)
# print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))
intersects = sorted([int2coord(y) for y in
y_intersections(coord2int(lng), x_longs, y_longs)])
# print(intersects)
nr_of_intersects = len(intersects)
if nr_of_intersects % 2 != 0:
raise ValueError('an uneven number of intersections has been accounted')
possible_latitudes = []
for i in range(0, nr_of_intersects, 2):
# collect all the zones between two intersections [in,out,in,out,...]
iplus = i + 1
intersection_in = intersects[i]
intersection_out = intersects[iplus]
if intersection_in == intersection_out:
# the polygon has a point exactly on the border of a shortcut here!
# only select the left shortcut if it is actually inside the polygon (point a little left is inside)
if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,
y_longs):
shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))
# the right shortcut is always selected
shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))
else:
# add all the shortcuts for the whole found area of intersection
possible_x_shortcut = x_shortcut(lng)
# both shortcuts should only be selected when the polygon doesnt stays on the border
middle = intersection_in + (intersection_out - intersection_in) / 2
if contained(coord2int(lng) - 1, coord2int(middle), x_longs,
y_longs):
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
possible_latitudes.append(intersection_out)
# both shortcuts right and left of the intersection should be selected!
possible_x_shortcut_min1 = possible_x_shortcut - 1
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))
else:
while intersection_in < intersection_out:
possible_latitudes.append(intersection_in)
intersection_in += step
# only the shortcut right of the intersection should be selected!
possible_latitudes.append(intersection_out)
for possible_latitude in possible_latitudes:
shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))
return shortcuts_for_line
def construct_shortcuts():
print('building shortucts...')
print('currently at polygon nr:')
line = 0
for xmax, xmin, ymax, ymin in all_boundaries:
# xmax, xmin, ymax, ymin = boundaries_of(line=line)
if line % 100 == 0:
print(line)
# print([xmax, xmin, ymax, ymin])
column_nrs = included_shortcut_column_nrs(xmax, xmin)
row_nrs = included_shortcut_row_nrs(ymax, ymin)
if big_zone(xmax, xmin, ymax, ymin):
# print('line ' + str(line))
# print('This is a big zone! computing exact shortcuts')
# print('Nr of entries before')
# print(len(column_nrs) * len(row_nrs))
# print('columns and rows before optimisation:')
# print(column_nrs)
# print(row_nrs)
# print(ints_of(line))
# This is a big zone! compute exact shortcuts with the whole polygon points
shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)
# n += len(shortcuts_for_line)
min_x_shortcut = column_nrs[0]
max_x_shortcut = column_nrs[-1]
min_y_shortcut = row_nrs[0]
max_y_shortcut = row_nrs[-1]
shortcuts_to_remove = []
# remove shortcuts from outside the possible/valid area
for x, y in shortcuts_for_line:
if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:
shortcuts_to_remove.append((x, y))
for s in shortcuts_to_remove:
shortcuts_for_line.remove(s)
# print('and after:')
# print(len(shortcuts_for_line))
# print(shortcuts_for_line)
# column_nrs_after = set()
# row_nrs_after = set()
# for x, y in shortcuts_for_line:
# column_nrs_after.add(x)
# row_nrs_after.add(y)
# print(column_nrs_after)
# print(row_nrs_after)
# print(shortcuts_for_line)
if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):
raise ValueError(
'there are more shortcuts than before now. there is something wrong with the algorithm!')
if len(shortcuts_for_line) < 3:
raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')
else:
shortcuts_for_line = []
for column_nr in column_nrs:
for row_nr in row_nrs:
shortcuts_for_line.append((column_nr, row_nr))
# print(shortcuts_for_line)
for shortcut in shortcuts_for_line:
shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]
line += 1
# print('collected entries:')
# print(n)
start_time = datetime.now()
construct_shortcuts()
end_time = datetime.now()
print('calculating the shortcuts took:', end_time - start_time, '\n')
# there are two floats per coordinate (lng, lat)
nr_of_floats = 2 * sum(all_lengths)
# write number of entries in shortcut field (x,y)
nr_of_entries_in_shortcut = []
shortcut_entries = []
amount_filled_shortcuts = 0
def sort_poly_shortcut(poly_nrs):
# TODO write test
# the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id
# this is critical for ruling out zones faster
# (as soon as just polygons of one zone are left this zone can be returned)
# only around 5% of all shortcuts include polygons from more than one zone
# in most of those cases there are only two types of zones (= entries in counted_zones) and one of them
# has only one entry (important to check the zone with one entry first!).
polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]
id_freq = [polygon_ids.count(id) for id in polygon_ids]
zipped = list(zip(poly_nrs, polygon_ids, id_freq))
# also make sure polygons with the same zone freq. are ordered after their zone id
# (polygons from different zones should not get mixed up)
sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])
return [x[0] for x in sort] # take only the polygon nrs
# count how many shortcut addresses will be written:
# flatten out the shortcuts in one list in the order they are going to be written inside the polygon file
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))
amount_filled_shortcuts += 1
nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))
# print((x,y,this_lines_shortcuts))
except KeyError:
nr_of_entries_in_shortcut.append(0)
amount_of_shortcuts = len(nr_of_entries_in_shortcut)
print_shortcut_statistics()
if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:
print(amount_of_shortcuts)
raise ValueError('this number of shortcut zones is wrong')
print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',
round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')
# for every shortcut <H and <I is written (nr of entries and address)
shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)
for nr in nr_of_entries_in_shortcut:
# every line in every shortcut takes up 2bytes
shortcut_space += NR_BYTES_H * nr
print('The number of polygons is:', nr_of_lines)
print('The number of floats in all the polygons is (2 per point):', nr_of_floats)
path = 'poly_nr2zone_id.bin'
print('writing file', path)
output_file = open(path, 'wb')
for zone_id in poly_nr2zone_id:
output_file.write(pack(b'<H', zone_id))
output_file.close()
print('Done\n')
# write zone_ids
path = 'poly_zone_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for zone_id in poly_zone_ids:
output_file.write(pack(b'<H', zone_id))
output_file.close()
# write boundary_data
path = 'poly_max_values.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for xmax, xmin, ymax, ymin in all_boundaries:
output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))
output_file.close()
# write polygon_data, addresses and number of values
path = 'poly_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
addresses = []
i = 0
for x_coords, y_coords in all_coords:
addresses.append(output_file.tell())
if all_lengths[i] != len(x_coords):
raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
i += 1
output_file.close()
path = 'poly_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for adr in addresses:
output_file.write(pack(b'<I', adr))
output_file.close()
path = 'poly_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_lengths:
output_file.write(pack(b'<I', length))
output_file.close()
# [SHORTCUT AREA]
# write all nr of entries
path = 'shortcuts_entry_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr > 300:
raise ValueError("There are too many polygons in this shortcut:", nr)
output_file.write(pack(b'<H', nr))
output_file.close()
# write Address of first Polygon_nr in shortcut field (x,y)
# Attention: 0 is written when no entries are in this shortcut
adr = 0
path = 'shortcuts_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for nr in nr_of_entries_in_shortcut:
if nr == 0:
output_file.write(pack(b'<I', 0))
else:
output_file.write(pack(b'<I', adr))
# each line_nr takes up 2 bytes of space
adr += 2 * nr
output_file.close()
# write Line_Nrs for every shortcut
path = 'shortcuts_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for entries in shortcut_entries:
for entry in entries:
if entry > nr_of_lines:
raise ValueError(entry)
output_file.write(pack(b'<H', entry))
output_file.close()
# write corresponding zone id for every shortcut (iff unique)
path = 'shortcuts_unique_id.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
if poly_zone_ids[-1] >= INVALID_ZONE_ID:
raise ValueError(
'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')
for x in range(360 * NR_SHORTCUTS_PER_LNG):
for y in range(180 * NR_SHORTCUTS_PER_LAT):
try:
shortcuts_this_entry = shortcuts[(x, y)]
unique_id = poly_zone_ids[shortcuts_this_entry[0]]
for nr in shortcuts_this_entry:
if poly_zone_ids[nr] != unique_id:
# there is a polygon from a different zone (hence an invalid id should be written)
unique_id = INVALID_ZONE_ID
break
output_file.write(pack(b'<H', unique_id))
except KeyError:
# also write an Invalid Id when there is no polygon at all
output_file.write(pack(b'<H', INVALID_ZONE_ID))
output_file.close()
# [HOLE AREA, Y = number of holes (very few: around 22)]
hole_space = 0
# '<H' for every hole store the related line
path = 'hole_poly_ids.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
i = 0
for line in polynrs_of_holes:
if line > nr_of_lines:
raise ValueError(line, nr_of_lines)
output_file.write(pack(b'<H', line))
i += 1
hole_space += output_file.tell()
output_file.close()
if i > amount_of_holes:
raise ValueError('There are more related lines than holes.')
# '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]
path = 'hole_coord_amount.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<H', length))
hole_space += output_file.tell()
output_file.close()
# '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]
adr = 0
path = 'hole_adr2data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for length in all_hole_lengths:
output_file.write(pack(b'<I', adr))
# each pair of points takes up 8 bytes of space
adr += 2 * NR_BYTES_I * length
hole_space += output_file.tell()
output_file.close()
# Y times [ 2x i signed ints for every hole: x coords, y coords ]
# write hole polygon_data
path = 'hole_data.bin'
print('writing file "', path, '"')
output_file = open(path, 'wb')
for x_coords, y_coords in all_holes:
for x in x_coords:
output_file.write(pack(b'<i', coord2int(x)))
for y in y_coords:
output_file.write(pack(b'<i', coord2int(y)))
hole_space += output_file.tell()
output_file.close()
polygon_space = nr_of_floats * NR_BYTES_I
total_space = polygon_space + hole_space + shortcut_space
print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')
print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')
print('holes make up', percent(hole_space, total_space), '% of the data')
print('Success!')
return
if __name__ == '__main__':
# parsing the data from the .json into RAM
parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)
# update all the zone names and set the right ids to be written in the poly_zone_ids.bin
# sort data according to zone_id
update_zone_names(path=TIMEZONE_NAMES_FILE)
# IMPORTANT: import the newly compiled timezone_names pickle!
# the compilation process needs the new version of the timezone names
with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:
timezone_names = json.loads(f.read())
# compute shortcuts and write everything into the binaries
compile_binaries()
|
normal
|
{
"blob_id": "52e43f795c864340734de2640e3c1a70b05e8ea0",
"index": 7248,
"step-1": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\n<mask token>\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\n<mask token>\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\n<mask token>\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\n<mask token>\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\n<mask token>\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\n<mask token>\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\n<mask token>\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef x_shortcut(lng):\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n return (xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 /\n NR_SHORTCUTS_PER_LAT)\n\n\ndef percent(numerator, denominator):\n return round(numerator / denominator * 100, 2)\n\n\ndef accumulated_frequency(int_list):\n out = []\n total = sum(int_list)\n acc = 0\n for e in int_list:\n acc += e\n out.append(percent(acc, total))\n return out\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\n<mask token>\n\n\ndef not_empty(iterable):\n for i in iterable:\n return True\n return False\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n tz_name = tz_dict.get('properties').get('tzid')\n all_tz_names.append(tz_name)\n geometry = tz_dict.get('geometry')\n if geometry.get('type') == 'MultiPolygon':\n multipolygon = geometry.get('coordinates')\n else:\n multipolygon = [geometry.get('coordinates')]\n for poly_with_hole in multipolygon:\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(\n y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n polygon_counter += 1\n current_zone_id += 1\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n raise ValueError(\n 'amount of coords cannot be represented by int32 in poly_coord_amount.bin:'\n , max(all_lengths))\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:'\n , max(all_hole_lengths))\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError(\n 'polygon counter and entry number in all_length is different:',\n polygon_counter, nr_of_lines)\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are'\n , nr_of_lines, 'polygons')\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n raise ValueError(\n 'zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n if 0 in all_lengths:\n raise ValueError()\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts),\n '% of all shortcuts are empty\\n')\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n amount_of_different_zones.append(len(registered_zone_ids))\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return (y - y1) * (x2 - x1) / delta_y + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return (x - x1) * (y2 - y1) / delta_x + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n if y_coords[iplus1] > y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif y_coords[iplus1] <= y:\n intersects.append(compute_x_intersection(y, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n elif x_coords[iplus1] <= x:\n intersects.append(compute_y_intersection(x, x_coords[i],\n x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n x_longs, y_longs = ints_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lat in latitudes_to_check(ymax, ymin):\n intersects = sorted([int2coord(x) for x in x_intersections(\n coord2int(lat), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(intersection_in), coord2int(lat) +\n 1, x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat) - 1))\n shortcuts_for_line.add((x_shortcut(intersection_in),\n y_shortcut(lat)))\n else:\n possible_y_shortcut = y_shortcut(lat)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1,\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut_min1))\n else:\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n possible_longitudes.append(intersection_out)\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(\n possible_x_coord), possible_y_shortcut))\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n intersects = sorted([int2coord(y) for y in y_intersections(\n coord2int(lng), x_longs, y_longs)])\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError(\n 'an uneven number of intersections has been accounted')\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n if contained(coord2int(lng) - 1, coord2int(\n intersection_in), x_longs, y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1,\n y_shortcut(intersection_in)))\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(\n intersection_in)))\n else:\n possible_x_shortcut = x_shortcut(lng)\n middle = intersection_in + (intersection_out -\n intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle),\n x_longs, y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n shortcuts_for_line.add((\n possible_x_shortcut_min1, y_shortcut(\n possible_latitude)))\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n possible_latitudes.append(intersection_out)\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut,\n y_shortcut(possible_latitude)))\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n if line % 100 == 0:\n print(line)\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n if big_zone(xmax, xmin, ymax, ymin):\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin,\n ymax, ymin, line)\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n for x, y in shortcuts_for_line:\n if (x < min_x_shortcut or x > max_x_shortcut or y <\n min_y_shortcut or y > max_y_shortcut):\n shortcuts_to_remove.append((x, y))\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!'\n )\n if len(shortcuts_for_line) < 3:\n raise ValueError(\n 'algorithm not valid! less than 3 zones detected (should be at least 3)'\n )\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n line += 1\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n nr_of_floats = 2 * sum(all_lengths)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n sort = sorted(sorted(zipped, key=lambda x: x[1]), key=lambda x: x[2])\n return [x[0] for x in sort]\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n shortcut_entries.append(sort_poly_shortcut(\n shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n if (amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG *\n NR_SHORTCUTS_PER_LAT):\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n print('The number of filled shortcut zones are:',\n amount_filled_shortcuts, '(=', round(amount_filled_shortcuts /\n amount_of_shortcuts * 100, 2), '% of all shortcuts)')\n shortcut_space = (360 * NR_SHORTCUTS_PER_LNG * 180 *\n NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I))\n for nr in nr_of_entries_in_shortcut:\n shortcut_space += NR_BYTES_H * nr\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):',\n nr_of_floats)\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n print('Done\\n')\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin),\n coord2int(ymax), coord2int(ymin)))\n output_file.close()\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!',\n all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError('There are too many polygons in this shortcut:',\n nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n adr += 2 * nr\n output_file.close()\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!'\n )\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[x, y]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n output_file.close()\n hole_space = 0\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n print('the polygon data makes up', percent(polygon_space, total_space),\n '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space),\n '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\n<mask token>\n",
"step-5": "# -*- coding:utf-8 -*-\nimport json\nfrom datetime import datetime\nfrom math import ceil, floor\nfrom os.path import abspath, join, pardir\nfrom struct import pack\n\nfrom .global_settings import (\n DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,\n NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,\n)\n# # # keep in mind: the faster numba optimized helper fct. cannot be used here,\n# # # because numpy classes are not being used at this stage yet!\nfrom .helpers import coord2int, inside_polygon, int2coord\n\n# from helpers import coord2int, inside_polygon, int2coord\n# from global_settings import (\n# DEBUG, DEBUG_POLY_STOP, INPUT_JSON_FILE_NAME, INVALID_ZONE_ID, NR_BYTES_H, NR_BYTES_I, NR_SHORTCUTS_PER_LAT,\n# NR_SHORTCUTS_PER_LNG, TIMEZONE_NAMES_FILE,\n# )\n\n\n# import sys\n# from os.path import dirname\n#\n# sys.path.insert(0, dirname(__file__))\n# from helpers import coord2int, int2coord, inside_polygon\n\n\n\"\"\"\nTODO write tests\n\nUSE INSTRUCTIONS:\n\n- download the latest timezones.geojson.zip file from github.com/evansiroky/timezone-boundary-builder/releases\n- unzip and place the combined.json inside this timezonefinder folder\n- run this file_converter.py as a script until the compilation of the binary files is completed.\n\n\nIMPORTANT: all coordinates (floats) are being converted to int32 (multiplied by 10^7). This makes computations faster\nand it takes lot less space, without loosing too much accuracy (min accuracy (=at the equator) is still 1cm !)\n\nB = unsigned char (1byte = 8bit Integer)\nH = unsigned short (2 byte integer)\nI = unsigned 4byte integer\ni = signed 4byte integer\n\n\nBinaries being written:\n\n[POLYGONS:] there are approx. 1k Polygons (evansiroky/timezone-boundary-builder 2017a)\npoly_zone_ids: the related zone_id for every polygon ('<H')\npoly_coord_amount: the amount of coordinates in every polygon ('<I')\npoly_adr2data: address in poly_data.bin where data for every polygon starts ('<I')\npoly_max_values: boundaries for every polygon ('<iiii': xmax, xmin, ymax, ymin)\npoly_data: coordinates for every polygon (multiple times '<i') (for every polygon first all x then all y values!)\npoly_nr2zone_id: the polygon number of the first polygon from every zone('<H')\n\n[HOLES:] number of holes (162 evansiroky/timezone-boundary-builder 2018d)\nhole_poly_ids: the related polygon_nr (=id) for every hole ('<H')\nhole_coord_amount: the amount of coordinates in every hole ('<H')\nhole_adr2data: address in hole_data.bin where data for every hole starts ('<I')\nhole_data: coordinates for every hole (multiple times '<i')\n\n[SHORTCUTS:] the surface of the world is split up into a grid of shortcut rectangles.\n-> there are a total of 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT shortcuts\nshortcut here means storing for every cell in a grid of the world map which polygons are located in that cell\nthey can therefore be used to drastically reduce the amount of polygons which need to be checked in order to\ndecide which timezone a point is located in.\n\nthe list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id\nthis is critical for ruling out zones faster (as soon as just polygons of one zone are left this zone can be returned)\n\nshortcuts_entry_amount: the amount of polygons for every shortcut ('<H')\nshortcuts_adr2data: address in shortcut_data.bin where data for every shortcut starts ('<I')\nshortcuts_data: polygon numbers (ids) for every shortcut (multiple times '<H')\nshortcuts_unique_id: the zone id if only polygons from one zone are present,\n a high number (with no corresponding zone) if not ('<H').\n the majority of zones either have no polygons at all (sea) or just one zone.\n this zone then can be instantly returned without actually testing polygons.\n\nalso stored extra binary if only one zone (to directly return that zone without checking)\n\n\n\nstatistics: (data version 2018g)\n\n\nmaximal amount of coordinates in one polygon: 139130\namount_of_holes: 219\namount of polygons: 1177\n\nshortcut statistics:\nhighest entry amount is 46\nfrequencies of entry amounts (from 0 to max entries):\n[76359, 45216, 7204, 710, 81, 17, 4, 1, 3, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\nrelative accumulated frequencies [%]:\n[58.92, 93.81, 99.37, 99.91, 99.98, 99.99, 99.99, 99.99, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,\n 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,\n 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]\n[41.08, 6.19, 0.63, 0.09, 0.02, 0.01, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0]\n58.92 % of all shortcuts are empty\n\nhighest amount of different zones in one shortcut is 7\nfrequencies of entry amounts (from 0 to max):\n[76359, 45555, 6963, 672, 43, 6, 1, 1]\nrelative accumulated frequencies [%]:\n[58.92, 94.07, 99.44, 99.96, 99.99, 100.0, 100.0, 100.0]\n[41.08, 5.93, 0.56, 0.04, 0.01, 0.0, 0.0, 0.0]\n--------------------------------\n\nThe number of filled shortcut zones are: 53241 (= 41.08 % of all shortcuts)\nThe number of polygons is: 1177\nThe number of floats in all the polygons is (2 per point): 10887056\nwriting file \" poly_nr2zone_id.bin \"\nDone\n\nwriting file \" poly_zone_ids.bin \"\nwriting file \" poly_max_values.bin \"\nwriting file \" poly_data.bin \"\nwriting file \" poly_adr2data.bin \"\nwriting file \" poly_coord_amount.bin \"\nwriting file \" shortcuts_entry_amount.bin \"\nwriting file \" shortcuts_adr2data.bin \"\nwriting file \" shortcuts_data.bin \"\nwriting file \" shortcuts_unique_id.bin \"\nwriting file \" hole_poly_ids.bin \"\nwriting file \" hole_coord_amount.bin \"\nwriting file \" hole_adr2data.bin \"\nwriting file \" hole_data.bin \"\nthe polygon data makes up 97.11 % of the data\nthe shortcuts make up 2.01 % of the data\nholes make up 0.88 % of the data\n\"\"\"\n\nnr_of_lines = -1\nall_tz_names = []\npoly_zone_ids = []\nall_boundaries = []\nall_coords = []\nall_lengths = []\namount_of_holes = 0\npolynrs_of_holes = []\nall_holes = []\nall_hole_lengths = []\nlist_of_pointers = []\npoly_nr2zone_id = []\nshortcuts = {}\n\n\ndef x_shortcut(lng):\n # higher (=lng) means higher x shortcut!!! 0 (-180deg lng) -> 360 (180deg)\n # if lng < -180 or lng >= 180:\n # raise ValueError('longitude out of bounds', lng)\n return floor((lng + 180) * NR_SHORTCUTS_PER_LNG)\n\n\ndef y_shortcut(lat):\n # lower y (=lat) means higher y shortcut!!! 0 (90deg lat) -> 180 (-90deg)\n # if lat < -90 or lat >= 90:\n # raise ValueError('this latitude is out of bounds', lat)\n return floor((90 - lat) * NR_SHORTCUTS_PER_LAT)\n\n\ndef big_zone(xmax, xmin, ymax, ymin):\n # returns True if a zone with those boundaries could have more than 4 shortcuts\n return xmax - xmin > 2 / NR_SHORTCUTS_PER_LNG and ymax - ymin > 2 / NR_SHORTCUTS_PER_LAT\n\n\ndef percent(numerator, denominator):\n return round((numerator / denominator) * 100, 2)\n\n\ndef accumulated_frequency(int_list):\n out = []\n total = sum(int_list)\n acc = 0\n for e in int_list:\n acc += e\n out.append(percent(acc, total))\n\n return out\n\n\ndef ints_of(line=0):\n x_coords, y_coords = all_coords[line]\n return [coord2int(x) for x in x_coords], [coord2int(x) for x in y_coords]\n\n\ndef contained(x, y, x_coords, y_coords):\n return inside_polygon(x, y, [x_coords, y_coords])\n\n\ndef unique(iterable):\n out = []\n for i in iterable:\n if i not in out:\n out.append(i)\n return out\n\n\ndef point_between(p1, p2):\n return p1[0] + (p2[0] - p1[0]) / 2, p1[1] + (p2[1] - p1[1]) / 2\n\n\ndef get_shortcuts(x, y):\n result = shortcuts.get((x, y))\n if result is None:\n return []\n else:\n return result\n\n\ndef _polygons(id_list):\n for i in id_list:\n yield all_coords[i]\n\n\ndef not_empty(iterable):\n for i in iterable:\n return True\n return False\n\n\ndef polys_of_one_zone():\n for i in range(len(timezone_names)):\n start = poly_nr2zone_id[i]\n end = poly_nr2zone_id[i + 1]\n yield list(range(start, end))\n\n\ndef replace_entry(iterable, entry, substitute):\n for i in range(len(iterable)):\n if iterable[i] == entry:\n iterable[i] = substitute\n return iterable\n\n\ndef _holes_in_poly(poly_nr):\n i = 0\n for nr in polynrs_of_holes:\n if nr == poly_nr:\n yield all_holes[i]\n i += 1\n\n\ndef parse_polygons_from_json(path=INPUT_JSON_FILE_NAME):\n global amount_of_holes\n global nr_of_lines\n global poly_zone_ids\n\n print('Parsing data from {}\\nthis could take a while...\\n'.format(path))\n tz_list = json.loads(open(path).read()).get('features')\n # this counter just counts polygons, not holes!\n polygon_counter = 0\n current_zone_id = 0\n print('holes found at: (poly_nr zone_name)')\n for tz_dict in tz_list:\n if DEBUG and polygon_counter > DEBUG_POLY_STOP:\n break\n\n tz_name = tz_dict.get('properties').get(\"tzid\")\n # print(tz_name)\n all_tz_names.append(tz_name)\n geometry = tz_dict.get(\"geometry\")\n if geometry.get('type') == 'MultiPolygon':\n # depth is 4\n multipolygon = geometry.get(\"coordinates\")\n else:\n # depth is 3 (only one polygon, possibly with holes!)\n multipolygon = [geometry.get(\"coordinates\")]\n # multipolygon has depth 4\n # assert depth_of_array(multipolygon) == 4\n for poly_with_hole in multipolygon:\n # assert len(poly_with_hole) > 0\n # the first entry is polygon\n x_coords, y_coords = list(zip(*poly_with_hole.pop(0)))\n # IMPORTANT: do not use the last value (is equal to the first)!\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_coords.append((x_coords, y_coords))\n # assert len(x_coords) > 0\n all_lengths.append(len(x_coords))\n all_boundaries.append((max(x_coords), min(x_coords), max(y_coords), min(y_coords)))\n poly_zone_ids.append(current_zone_id)\n\n # everything else is interpreted as a hole!\n for hole in poly_with_hole:\n print(polygon_counter, tz_name)\n # keep track of how many holes there are\n amount_of_holes += 1\n polynrs_of_holes.append(polygon_counter)\n x_coords, y_coords = list(zip(*hole))\n # IMPORTANT: do not use the last value (is equal to the first)!\n x_coords = list(x_coords)\n y_coords = list(y_coords)\n x_coords.pop(-1)\n y_coords.pop(-1)\n all_holes.append((x_coords, y_coords))\n all_hole_lengths.append(len(x_coords))\n\n polygon_counter += 1\n\n current_zone_id += 1\n\n if max(all_lengths) >= 2 ** (8 * NR_BYTES_I):\n # 34621 in tz_world 2016d (small enough for int16)\n # 137592 in evansiroky/timezone-boundary-builder 2017a (now int32 is needed!)\n raise ValueError('amount of coords cannot be represented by int32 in poly_coord_amount.bin:',\n max(all_lengths))\n\n if max(all_hole_lengths) >= 2 ** (8 * NR_BYTES_H):\n # 21071 in evansiroky/timezone-boundary-builder 2017a (int16 still enough)\n raise ValueError('amount of coords cannot be represented by short (int16) in hole_coord_amount.bin:',\n max(all_hole_lengths))\n\n nr_of_lines = len(all_lengths)\n if polygon_counter != nr_of_lines:\n raise ValueError('polygon counter and entry number in all_length is different:', polygon_counter, nr_of_lines)\n\n if nr_of_lines >= 2 ** (8 * NR_BYTES_H):\n # 24k in tz_world 2016d\n # 1022 in evansiroky/timezone-boundary-builder 2017a\n raise ValueError('polygon id cannot be encoded as short (int16) in hole_coord_amount.bin! there are',\n nr_of_lines, 'polygons')\n\n if poly_zone_ids[-1] > 2 ** (8 * NR_BYTES_H):\n # 420 different zones in evansiroky/timezone-boundary-builder 2017a\n # used in shortcuts_unique_id and poly_zone_ids\n raise ValueError('zone id cannot be encoded as char (int8). the last id is',\n poly_zone_ids[-1])\n\n if 0 in all_lengths:\n raise ValueError()\n\n print('... parsing done.')\n print('maximal amount of coordinates in one polygon:', max(all_lengths))\n print('amount_of_holes:', amount_of_holes)\n print('amount of polygons:', nr_of_lines)\n print('\\n')\n\n\ndef update_zone_names(path=TIMEZONE_NAMES_FILE):\n global poly_zone_ids\n global list_of_pointers\n global all_boundaries\n global all_coords\n global all_lengths\n global polynrs_of_holes\n print('updating the zone names in {} now...'.format(path))\n # pickle the zone names (python array)\n with open(abspath(path), 'w') as f:\n f.write(json.dumps(all_tz_names))\n print('...Done.\\n\\nComputing where zones start and end...')\n i = 0\n last_id = -1\n for zone_id in poly_zone_ids:\n if zone_id != last_id:\n poly_nr2zone_id.append(i)\n if zone_id < last_id:\n raise ValueError()\n last_id = zone_id\n i += 1\n poly_nr2zone_id.append(i)\n print('...Done.\\n')\n\n\ndef compile_binaries():\n global nr_of_lines\n global shortcuts\n\n def print_shortcut_statistics():\n frequencies = []\n max_val = max(*nr_of_entries_in_shortcut)\n print('shortcut statistics:')\n print('highest entry amount is', max_val)\n while max_val >= 0:\n frequencies.append(nr_of_entries_in_shortcut.count(max_val))\n max_val -= 1\n\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max entries):')\n print(frequencies)\n empty_shortcuts = frequencies[0]\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print(percent(empty_shortcuts, amount_of_shortcuts), '% of all shortcuts are empty\\n')\n\n amount_of_different_zones = []\n for entry in shortcut_entries:\n registered_zone_ids = []\n for polygon_nr in entry:\n id = poly_zone_ids[polygon_nr]\n if id not in registered_zone_ids:\n registered_zone_ids.append(id)\n\n amount_of_different_zones.append(len(registered_zone_ids))\n\n frequencies = []\n max_val = max(*amount_of_different_zones)\n print('highest amount of different zones in one shortcut is', max_val)\n while max_val >= 1:\n frequencies.append(amount_of_different_zones.count(max_val))\n max_val -= 1\n # show the proper amount of shortcuts with 0 zones (=nr of empty shortcuts)\n frequencies.append(empty_shortcuts)\n frequencies.reverse()\n print('frequencies of entry amounts (from 0 to max):')\n print(frequencies)\n print('relative accumulated frequencies [%]:')\n acc = accumulated_frequency(frequencies)\n print(acc)\n print([round(100 - x, 2) for x in acc])\n print('--------------------------------\\n')\n\n def included_shortcut_row_nrs(max_lat, min_lat):\n return list(range(y_shortcut(max_lat), y_shortcut(min_lat) + 1))\n\n def included_shortcut_column_nrs(max_lng, min_lng):\n return list(range(x_shortcut(min_lng), x_shortcut(max_lng) + 1))\n\n def longitudes_to_check(max_lng, min_lng):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LNG\n current = ceil(min_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n end = floor(max_lng * NR_SHORTCUTS_PER_LNG) / NR_SHORTCUTS_PER_LNG\n while current < end:\n output_list.append(current)\n current += step\n\n output_list.append(end)\n return output_list\n\n def latitudes_to_check(max_lat, min_lat):\n output_list = []\n step = 1 / NR_SHORTCUTS_PER_LAT\n current = ceil(min_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n end = floor(max_lat * NR_SHORTCUTS_PER_LAT) / NR_SHORTCUTS_PER_LAT\n while current < end:\n output_list.append(current)\n current += step\n\n output_list.append(end)\n return output_list\n\n def compute_x_intersection(y, x1, x2, y1, y2):\n \"\"\"returns the x intersection from a horizontal line in y with the line from x1,y1 to x1,y2\n \"\"\"\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return ((y - y1) * (x2 - x1) / delta_y) + x1\n\n def compute_y_intersection(x, x1, x2, y1, y2):\n \"\"\"returns the y intersection from a vertical line in x with the line from x1,y1 to x1,y2\n \"\"\"\n delta_x = x2 - x1\n if delta_x == 0:\n return x1\n return ((x - x1) * (y2 - y1) / delta_x) + y1\n\n def x_intersections(y, x_coords, y_coords):\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if y_coords[i] <= y:\n # print('Y1<=y')\n if y_coords[iplus1] > y:\n # this was a crossing. compute the intersect\n # print('Y2>y')\n intersects.append(\n compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n else:\n # print('Y1>y')\n if y_coords[iplus1] <= y:\n # this was a crossing. compute the intersect\n # print('Y2<=y')\n intersects.append(compute_x_intersection(y, x_coords[i], x_coords[iplus1], y_coords[i],\n y_coords[iplus1]))\n return intersects\n\n def y_intersections(x, x_coords, y_coords):\n\n intersects = []\n for i in range(len(y_coords) - 1):\n iplus1 = i + 1\n if x_coords[i] <= x:\n if x_coords[iplus1] > x:\n # this was a crossing. compute the intersect\n intersects.append(\n compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i], y_coords[iplus1]))\n else:\n if x_coords[iplus1] <= x:\n # this was a crossing. compute the intersect\n intersects.append(compute_y_intersection(x, x_coords[i], x_coords[iplus1], y_coords[i],\n y_coords[iplus1]))\n return intersects\n\n def compute_exact_shortcuts(xmax, xmin, ymax, ymin, line):\n shortcuts_for_line = set()\n\n # x_longs = binary_reader.x_coords_of(line)\n x_longs, y_longs = ints_of(line)\n\n # y_longs = binary_reader.y_coords_of(line)\n y_longs.append(y_longs[0])\n x_longs.append(x_longs[0])\n\n step = 1 / NR_SHORTCUTS_PER_LAT\n # print('checking the latitudes')\n for lat in latitudes_to_check(ymax, ymin):\n # print(lat)\n # print(coordinate_to_longlong(lat))\n # print(y_longs)\n # print(x_intersections(coordinate_to_longlong(lat), x_longs, y_longs))\n # raise ValueError\n intersects = sorted([int2coord(x) for x in\n x_intersections(coord2int(lat), x_longs, y_longs)])\n # print(intersects)\n\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError('an uneven number of intersections has been accounted')\n\n for i in range(0, nr_of_intersects, 2):\n possible_longitudes = []\n # collect all the zones between two intersections [in,out,in,out,...]\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n # the polygon has a point exactly on the border of a shortcut zone here!\n # only select the top shortcut if it is actually inside the polygon (point a little up is inside)\n if contained(coord2int(intersection_in), coord2int(lat) + 1, x_longs,\n y_longs):\n shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat) - 1))\n # the bottom shortcut is always selected\n shortcuts_for_line.add((x_shortcut(intersection_in), y_shortcut(lat)))\n\n else:\n # add all the shortcuts for the whole found area of intersection\n possible_y_shortcut = y_shortcut(lat)\n\n # both shortcuts should only be selected when the polygon doesnt stays on the border\n middle = intersection_in + (intersection_out - intersection_in) / 2\n if contained(coord2int(middle), coord2int(lat) + 1, x_longs,\n y_longs):\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n\n possible_longitudes.append(intersection_out)\n\n # the shortcut above and below of the intersection should be selected!\n possible_y_shortcut_min1 = possible_y_shortcut - 1\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut_min1))\n else:\n # polygon does not cross the border!\n while intersection_in < intersection_out:\n possible_longitudes.append(intersection_in)\n intersection_in += step\n\n possible_longitudes.append(intersection_out)\n\n # only the shortcut above of the intersection should be selected!\n for possible_x_coord in possible_longitudes:\n shortcuts_for_line.add((x_shortcut(possible_x_coord), possible_y_shortcut))\n\n # print('now all the longitudes to check')\n # same procedure horizontally\n step = 1 / NR_SHORTCUTS_PER_LAT\n for lng in longitudes_to_check(xmax, xmin):\n # print(lng)\n # print(coordinate_to_longlong(lng))\n # print(x_longs)\n # print(x_intersections(coordinate_to_longlong(lng), x_longs, y_longs))\n intersects = sorted([int2coord(y) for y in\n y_intersections(coord2int(lng), x_longs, y_longs)])\n # print(intersects)\n\n nr_of_intersects = len(intersects)\n if nr_of_intersects % 2 != 0:\n raise ValueError('an uneven number of intersections has been accounted')\n\n possible_latitudes = []\n for i in range(0, nr_of_intersects, 2):\n # collect all the zones between two intersections [in,out,in,out,...]\n iplus = i + 1\n intersection_in = intersects[i]\n intersection_out = intersects[iplus]\n if intersection_in == intersection_out:\n # the polygon has a point exactly on the border of a shortcut here!\n # only select the left shortcut if it is actually inside the polygon (point a little left is inside)\n if contained(coord2int(lng) - 1, coord2int(intersection_in), x_longs,\n y_longs):\n shortcuts_for_line.add((x_shortcut(lng) - 1, y_shortcut(intersection_in)))\n # the right shortcut is always selected\n shortcuts_for_line.add((x_shortcut(lng), y_shortcut(intersection_in)))\n\n else:\n # add all the shortcuts for the whole found area of intersection\n possible_x_shortcut = x_shortcut(lng)\n\n # both shortcuts should only be selected when the polygon doesnt stays on the border\n middle = intersection_in + (intersection_out - intersection_in) / 2\n if contained(coord2int(lng) - 1, coord2int(middle), x_longs,\n y_longs):\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n\n possible_latitudes.append(intersection_out)\n\n # both shortcuts right and left of the intersection should be selected!\n possible_x_shortcut_min1 = possible_x_shortcut - 1\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))\n shortcuts_for_line.add((possible_x_shortcut_min1, y_shortcut(possible_latitude)))\n\n else:\n while intersection_in < intersection_out:\n possible_latitudes.append(intersection_in)\n intersection_in += step\n # only the shortcut right of the intersection should be selected!\n possible_latitudes.append(intersection_out)\n\n for possible_latitude in possible_latitudes:\n shortcuts_for_line.add((possible_x_shortcut, y_shortcut(possible_latitude)))\n\n return shortcuts_for_line\n\n def construct_shortcuts():\n print('building shortucts...')\n print('currently at polygon nr:')\n line = 0\n for xmax, xmin, ymax, ymin in all_boundaries:\n # xmax, xmin, ymax, ymin = boundaries_of(line=line)\n if line % 100 == 0:\n print(line)\n # print([xmax, xmin, ymax, ymin])\n\n column_nrs = included_shortcut_column_nrs(xmax, xmin)\n row_nrs = included_shortcut_row_nrs(ymax, ymin)\n\n if big_zone(xmax, xmin, ymax, ymin):\n\n # print('line ' + str(line))\n # print('This is a big zone! computing exact shortcuts')\n # print('Nr of entries before')\n # print(len(column_nrs) * len(row_nrs))\n # print('columns and rows before optimisation:')\n # print(column_nrs)\n # print(row_nrs)\n # print(ints_of(line))\n\n # This is a big zone! compute exact shortcuts with the whole polygon points\n shortcuts_for_line = compute_exact_shortcuts(xmax, xmin, ymax, ymin, line)\n # n += len(shortcuts_for_line)\n\n min_x_shortcut = column_nrs[0]\n max_x_shortcut = column_nrs[-1]\n min_y_shortcut = row_nrs[0]\n max_y_shortcut = row_nrs[-1]\n shortcuts_to_remove = []\n\n # remove shortcuts from outside the possible/valid area\n for x, y in shortcuts_for_line:\n if x < min_x_shortcut or x > max_x_shortcut or y < min_y_shortcut or y > max_y_shortcut:\n shortcuts_to_remove.append((x, y))\n\n for s in shortcuts_to_remove:\n shortcuts_for_line.remove(s)\n\n # print('and after:')\n # print(len(shortcuts_for_line))\n # print(shortcuts_for_line)\n # column_nrs_after = set()\n # row_nrs_after = set()\n # for x, y in shortcuts_for_line:\n # column_nrs_after.add(x)\n # row_nrs_after.add(y)\n # print(column_nrs_after)\n # print(row_nrs_after)\n # print(shortcuts_for_line)\n\n if len(shortcuts_for_line) > len(column_nrs) * len(row_nrs):\n raise ValueError(\n 'there are more shortcuts than before now. there is something wrong with the algorithm!')\n if len(shortcuts_for_line) < 3:\n raise ValueError('algorithm not valid! less than 3 zones detected (should be at least 3)')\n\n else:\n shortcuts_for_line = []\n for column_nr in column_nrs:\n for row_nr in row_nrs:\n shortcuts_for_line.append((column_nr, row_nr))\n # print(shortcuts_for_line)\n\n for shortcut in shortcuts_for_line:\n shortcuts[shortcut] = shortcuts.get(shortcut, []) + [line]\n\n line += 1\n # print('collected entries:')\n # print(n)\n\n start_time = datetime.now()\n construct_shortcuts()\n end_time = datetime.now()\n print('calculating the shortcuts took:', end_time - start_time, '\\n')\n\n # there are two floats per coordinate (lng, lat)\n nr_of_floats = 2 * sum(all_lengths)\n\n # write number of entries in shortcut field (x,y)\n nr_of_entries_in_shortcut = []\n shortcut_entries = []\n amount_filled_shortcuts = 0\n\n def sort_poly_shortcut(poly_nrs):\n # TODO write test\n # the list of polygon ids in each shortcut is sorted after freq. of appearance of their zone id\n # this is critical for ruling out zones faster\n # (as soon as just polygons of one zone are left this zone can be returned)\n # only around 5% of all shortcuts include polygons from more than one zone\n # in most of those cases there are only two types of zones (= entries in counted_zones) and one of them\n # has only one entry (important to check the zone with one entry first!).\n polygon_ids = [poly_zone_ids[poly_nr] for poly_nr in poly_nrs]\n id_freq = [polygon_ids.count(id) for id in polygon_ids]\n zipped = list(zip(poly_nrs, polygon_ids, id_freq))\n # also make sure polygons with the same zone freq. are ordered after their zone id\n # (polygons from different zones should not get mixed up)\n sort = sorted((sorted(zipped, key=lambda x: x[1])), key=lambda x: x[2])\n return [x[0] for x in sort] # take only the polygon nrs\n\n # count how many shortcut addresses will be written:\n # flatten out the shortcuts in one list in the order they are going to be written inside the polygon file\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[(x, y)]\n shortcut_entries.append(sort_poly_shortcut(shortcuts_this_entry))\n amount_filled_shortcuts += 1\n nr_of_entries_in_shortcut.append(len(shortcuts_this_entry))\n # print((x,y,this_lines_shortcuts))\n except KeyError:\n nr_of_entries_in_shortcut.append(0)\n\n amount_of_shortcuts = len(nr_of_entries_in_shortcut)\n print_shortcut_statistics()\n\n if amount_of_shortcuts != 360 * 180 * NR_SHORTCUTS_PER_LNG * NR_SHORTCUTS_PER_LAT:\n print(amount_of_shortcuts)\n raise ValueError('this number of shortcut zones is wrong')\n\n print('The number of filled shortcut zones are:', amount_filled_shortcuts, '(=',\n round((amount_filled_shortcuts / amount_of_shortcuts) * 100, 2), '% of all shortcuts)')\n\n # for every shortcut <H and <I is written (nr of entries and address)\n shortcut_space = 360 * NR_SHORTCUTS_PER_LNG * 180 * NR_SHORTCUTS_PER_LAT * (NR_BYTES_H + NR_BYTES_I)\n for nr in nr_of_entries_in_shortcut:\n # every line in every shortcut takes up 2bytes\n shortcut_space += NR_BYTES_H * nr\n\n print('The number of polygons is:', nr_of_lines)\n print('The number of floats in all the polygons is (2 per point):', nr_of_floats)\n\n path = 'poly_nr2zone_id.bin'\n print('writing file', path)\n output_file = open(path, 'wb')\n for zone_id in poly_nr2zone_id:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n\n print('Done\\n')\n # write zone_ids\n path = 'poly_zone_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for zone_id in poly_zone_ids:\n output_file.write(pack(b'<H', zone_id))\n output_file.close()\n\n # write boundary_data\n path = 'poly_max_values.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for xmax, xmin, ymax, ymin in all_boundaries:\n output_file.write(pack(b'<iiii', coord2int(xmax), coord2int(xmin), coord2int(ymax), coord2int(ymin)))\n output_file.close()\n\n # write polygon_data, addresses and number of values\n path = 'poly_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n addresses = []\n i = 0\n for x_coords, y_coords in all_coords:\n addresses.append(output_file.tell())\n if all_lengths[i] != len(x_coords):\n raise ValueError('x_coords do not have the expected length!', all_lengths[i], len(x_coords))\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n i += 1\n output_file.close()\n\n path = 'poly_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for adr in addresses:\n output_file.write(pack(b'<I', adr))\n output_file.close()\n\n path = 'poly_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_lengths:\n output_file.write(pack(b'<I', length))\n output_file.close()\n\n # [SHORTCUT AREA]\n # write all nr of entries\n path = 'shortcuts_entry_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr > 300:\n raise ValueError(\"There are too many polygons in this shortcut:\", nr)\n output_file.write(pack(b'<H', nr))\n output_file.close()\n\n # write Address of first Polygon_nr in shortcut field (x,y)\n # Attention: 0 is written when no entries are in this shortcut\n adr = 0\n path = 'shortcuts_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for nr in nr_of_entries_in_shortcut:\n if nr == 0:\n output_file.write(pack(b'<I', 0))\n else:\n output_file.write(pack(b'<I', adr))\n # each line_nr takes up 2 bytes of space\n adr += 2 * nr\n output_file.close()\n\n # write Line_Nrs for every shortcut\n path = 'shortcuts_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for entries in shortcut_entries:\n for entry in entries:\n if entry > nr_of_lines:\n raise ValueError(entry)\n output_file.write(pack(b'<H', entry))\n output_file.close()\n\n # write corresponding zone id for every shortcut (iff unique)\n path = 'shortcuts_unique_id.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n if poly_zone_ids[-1] >= INVALID_ZONE_ID:\n raise ValueError(\n 'There are too many zones for this data type (H). The shortcuts_unique_id file need a Invalid Id!')\n for x in range(360 * NR_SHORTCUTS_PER_LNG):\n for y in range(180 * NR_SHORTCUTS_PER_LAT):\n try:\n shortcuts_this_entry = shortcuts[(x, y)]\n unique_id = poly_zone_ids[shortcuts_this_entry[0]]\n for nr in shortcuts_this_entry:\n if poly_zone_ids[nr] != unique_id:\n # there is a polygon from a different zone (hence an invalid id should be written)\n unique_id = INVALID_ZONE_ID\n break\n output_file.write(pack(b'<H', unique_id))\n except KeyError:\n # also write an Invalid Id when there is no polygon at all\n output_file.write(pack(b'<H', INVALID_ZONE_ID))\n\n output_file.close()\n # [HOLE AREA, Y = number of holes (very few: around 22)]\n hole_space = 0\n\n # '<H' for every hole store the related line\n path = 'hole_poly_ids.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n i = 0\n for line in polynrs_of_holes:\n if line > nr_of_lines:\n raise ValueError(line, nr_of_lines)\n output_file.write(pack(b'<H', line))\n i += 1\n hole_space += output_file.tell()\n output_file.close()\n\n if i > amount_of_holes:\n raise ValueError('There are more related lines than holes.')\n\n # '<H' Y times [H unsigned short: nr of values (coordinate PAIRS! x,y in int32 int32) in this hole]\n path = 'hole_coord_amount.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<H', length))\n hole_space += output_file.tell()\n output_file.close()\n\n # '<I' Y times [ I unsigned int: absolute address of the byte where the data of that hole starts]\n adr = 0\n path = 'hole_adr2data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for length in all_hole_lengths:\n output_file.write(pack(b'<I', adr))\n # each pair of points takes up 8 bytes of space\n adr += 2 * NR_BYTES_I * length\n hole_space += output_file.tell()\n output_file.close()\n\n # Y times [ 2x i signed ints for every hole: x coords, y coords ]\n # write hole polygon_data\n path = 'hole_data.bin'\n print('writing file \"', path, '\"')\n output_file = open(path, 'wb')\n for x_coords, y_coords in all_holes:\n for x in x_coords:\n output_file.write(pack(b'<i', coord2int(x)))\n for y in y_coords:\n output_file.write(pack(b'<i', coord2int(y)))\n hole_space += output_file.tell()\n output_file.close()\n\n polygon_space = nr_of_floats * NR_BYTES_I\n total_space = polygon_space + hole_space + shortcut_space\n\n print('the polygon data makes up', percent(polygon_space, total_space), '% of the data')\n print('the shortcuts make up', percent(shortcut_space, total_space), '% of the data')\n print('holes make up', percent(hole_space, total_space), '% of the data')\n print('Success!')\n return\n\n\nif __name__ == '__main__':\n # parsing the data from the .json into RAM\n parse_polygons_from_json(path=INPUT_JSON_FILE_NAME)\n # update all the zone names and set the right ids to be written in the poly_zone_ids.bin\n # sort data according to zone_id\n update_zone_names(path=TIMEZONE_NAMES_FILE)\n\n # IMPORTANT: import the newly compiled timezone_names pickle!\n # the compilation process needs the new version of the timezone names\n with open(abspath(join(__file__, pardir, TIMEZONE_NAMES_FILE)), 'r') as f:\n timezone_names = json.loads(f.read())\n\n # compute shortcuts and write everything into the binaries\n compile_binaries()\n",
"step-ids": [
11,
13,
15,
17,
22
]
}
|
[
11,
13,
15,
17,
22
] |
<|reserved_special_token_0|>
def findIris(eyeMask, im, thresh):
r = im[:, :, 2]
_, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
morph = cv2.dilate(binaryIm, kernel, 1)
morph = cv2.merge((morph, morph, morph))
morph = morph.astype(float) / 255
eyeMask = eyeMask.astype(float) / 255
iris = cv2.multiply(eyeMask, morph)
return iris
def findCentroid(iris):
M = cv2.moments(iris[:, :, 0])
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
centroid = cX, cY
return centroid
<|reserved_special_token_0|>
def changeEyeColor(im, irisMask, inverseIrisMask):
imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))
imCopy = imCopy.astype(float) / 255
irisMask = irisMask.astype(float) / 255
inverseIrisMask = inverseIrisMask.astype(float) / 255
im = im.astype(float) / 255
faceWithoutEye = cv2.multiply(inverseIrisMask, im)
newIris = cv2.multiply(irisMask, imCopy)
result = faceWithoutEye + newIris
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def createEyeMask(eyeLandmarks, im):
leftEyePoints = eyeLandmarks
eyeMask = np.zeros_like(im)
cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))
eyeMask = np.uint8(eyeMask)
return eyeMask
def findIris(eyeMask, im, thresh):
r = im[:, :, 2]
_, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
morph = cv2.dilate(binaryIm, kernel, 1)
morph = cv2.merge((morph, morph, morph))
morph = morph.astype(float) / 255
eyeMask = eyeMask.astype(float) / 255
iris = cv2.multiply(eyeMask, morph)
return iris
def findCentroid(iris):
M = cv2.moments(iris[:, :, 0])
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
centroid = cX, cY
return centroid
def createIrisMask(iris, centroid, rad):
cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
flag = 10000
final_cnt = None
for cnt in cnts:
(x, y), radius = cv2.minEnclosingCircle(cnt)
distance = abs(centroid[0] - x) + abs(centroid[1] - y)
if distance < flag:
flag = distance
final_cnt = cnt
else:
continue
(x, y), radius = cv2.minEnclosingCircle(final_cnt)
center = int(x), int(y)
center = centroid
radius = rad // 2 + 2
print(radius)
irisMask = np.zeros_like(iris)
inverseIrisMask = np.ones_like(iris) * 255
cv2.circle(irisMask, center, radius, (255, 255, 255), -1)
cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)
return irisMask, inverseIrisMask
def changeEyeColor(im, irisMask, inverseIrisMask):
imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))
imCopy = imCopy.astype(float) / 255
irisMask = irisMask.astype(float) / 255
inverseIrisMask = inverseIrisMask.astype(float) / 255
im = im.astype(float) / 255
faceWithoutEye = cv2.multiply(inverseIrisMask, im)
newIris = cv2.multiply(irisMask, imCopy)
result = faceWithoutEye + newIris
return result
def float642Uint8(im):
im2Convert = im.astype(np.float64) / np.amax(im)
im2Convert = 255 * im2Convert
convertedIm = im2Convert.astype(np.uint8)
return convertedIm
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
rad = (A + B) / 2
return int(rad)
<|reserved_special_token_0|>
def createEyeMask(eyeLandmarks, im):
leftEyePoints = eyeLandmarks
eyeMask = np.zeros_like(im)
cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))
eyeMask = np.uint8(eyeMask)
return eyeMask
def findIris(eyeMask, im, thresh):
r = im[:, :, 2]
_, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
morph = cv2.dilate(binaryIm, kernel, 1)
morph = cv2.merge((morph, morph, morph))
morph = morph.astype(float) / 255
eyeMask = eyeMask.astype(float) / 255
iris = cv2.multiply(eyeMask, morph)
return iris
def findCentroid(iris):
M = cv2.moments(iris[:, :, 0])
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
centroid = cX, cY
return centroid
def createIrisMask(iris, centroid, rad):
cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
flag = 10000
final_cnt = None
for cnt in cnts:
(x, y), radius = cv2.minEnclosingCircle(cnt)
distance = abs(centroid[0] - x) + abs(centroid[1] - y)
if distance < flag:
flag = distance
final_cnt = cnt
else:
continue
(x, y), radius = cv2.minEnclosingCircle(final_cnt)
center = int(x), int(y)
center = centroid
radius = rad // 2 + 2
print(radius)
irisMask = np.zeros_like(iris)
inverseIrisMask = np.ones_like(iris) * 255
cv2.circle(irisMask, center, radius, (255, 255, 255), -1)
cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)
return irisMask, inverseIrisMask
def changeEyeColor(im, irisMask, inverseIrisMask):
imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))
imCopy = imCopy.astype(float) / 255
irisMask = irisMask.astype(float) / 255
inverseIrisMask = inverseIrisMask.astype(float) / 255
im = im.astype(float) / 255
faceWithoutEye = cv2.multiply(inverseIrisMask, im)
newIris = cv2.multiply(irisMask, imCopy)
result = faceWithoutEye + newIris
return result
def float642Uint8(im):
im2Convert = im.astype(np.float64) / np.amax(im)
im2Convert = 255 * im2Convert
convertedIm = im2Convert.astype(np.uint8)
return convertedIm
<|reserved_special_token_0|>
cv2.imwrite('3.jpg', coloredEyesLady)
<|reserved_special_token_1|>
import cv2
import dlib
import faceBlendCommon as face
from matplotlib import pyplot as plt
from scipy.spatial import distance as dist
import numpy as np
import cmapy
import math
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
rad = (A + B) / 2
return int(rad)
im = cv2.imread('imgs/2.jpg')
PREDICTOR_PATH = './model/shape_predictor_68_face_landmarks.dat'
faceDetector = dlib.get_frontal_face_detector()
landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)
landmarks = face.getLandmarks(faceDetector, landmarkDetector, im)
def createEyeMask(eyeLandmarks, im):
leftEyePoints = eyeLandmarks
eyeMask = np.zeros_like(im)
cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))
eyeMask = np.uint8(eyeMask)
return eyeMask
def findIris(eyeMask, im, thresh):
r = im[:, :, 2]
_, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
morph = cv2.dilate(binaryIm, kernel, 1)
morph = cv2.merge((morph, morph, morph))
morph = morph.astype(float) / 255
eyeMask = eyeMask.astype(float) / 255
iris = cv2.multiply(eyeMask, morph)
return iris
def findCentroid(iris):
M = cv2.moments(iris[:, :, 0])
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
centroid = cX, cY
return centroid
def createIrisMask(iris, centroid, rad):
cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
flag = 10000
final_cnt = None
for cnt in cnts:
(x, y), radius = cv2.minEnclosingCircle(cnt)
distance = abs(centroid[0] - x) + abs(centroid[1] - y)
if distance < flag:
flag = distance
final_cnt = cnt
else:
continue
(x, y), radius = cv2.minEnclosingCircle(final_cnt)
center = int(x), int(y)
center = centroid
radius = rad // 2 + 2
print(radius)
irisMask = np.zeros_like(iris)
inverseIrisMask = np.ones_like(iris) * 255
cv2.circle(irisMask, center, radius, (255, 255, 255), -1)
cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)
return irisMask, inverseIrisMask
def changeEyeColor(im, irisMask, inverseIrisMask):
imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))
imCopy = imCopy.astype(float) / 255
irisMask = irisMask.astype(float) / 255
inverseIrisMask = inverseIrisMask.astype(float) / 255
im = im.astype(float) / 255
faceWithoutEye = cv2.multiply(inverseIrisMask, im)
newIris = cv2.multiply(irisMask, imCopy)
result = faceWithoutEye + newIris
return result
def float642Uint8(im):
im2Convert = im.astype(np.float64) / np.amax(im)
im2Convert = 255 * im2Convert
convertedIm = im2Convert.astype(np.uint8)
return convertedIm
leftEyeMask = createEyeMask(landmarks[36:42], im)
rightEyeMask = createEyeMask(landmarks[42:48], im)
leftIris = findIris(leftEyeMask, im, 100)
rightIris = findIris(rightEyeMask, im, 50)
leftIrisCentroid = findCentroid(leftIris)
rightIrisCentroid = findCentroid(rightIris)
rad_left = eye_aspect_ratio(landmarks[36:42])
rad_right = eye_aspect_ratio(landmarks[42:48])
rightIrisMask, rightInverseIrisMask = createIrisMask(rightIris,
rightIrisCentroid, rad_right)
leftIrisMask, leftInverseIrisMask = createIrisMask(leftIris,
leftIrisCentroid, rad_left)
coloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask)
coloredEyesLady = float642Uint8(coloredEyesLady)
coloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask,
leftInverseIrisMask)
coloredEyesLady = float642Uint8(coloredEyesLady)
cv2.imwrite('3.jpg', coloredEyesLady)
<|reserved_special_token_1|>
import cv2
import dlib
import faceBlendCommon as face
from matplotlib import pyplot as plt
from scipy.spatial import distance as dist
import numpy as np
import cmapy
import math
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
rad=(A+B)/2
return int(rad)
# Load Image
im = cv2.imread("imgs/2.jpg")
# Detect face landmarks
PREDICTOR_PATH = r"./model/shape_predictor_68_face_landmarks.dat"
faceDetector = dlib.get_frontal_face_detector()
landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)
landmarks = face.getLandmarks(faceDetector, landmarkDetector, im)
def createEyeMask(eyeLandmarks, im):
leftEyePoints = eyeLandmarks
eyeMask = np.zeros_like(im)
cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))
eyeMask = np.uint8(eyeMask)
return eyeMask
def findIris(eyeMask, im, thresh):
r = im[:,:,2]
_, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4))
morph = cv2.dilate(binaryIm, kernel, 1)
morph = cv2.merge((morph, morph, morph))
morph = morph.astype(float)/255
eyeMask = eyeMask.astype(float)/255
iris = cv2.multiply(eyeMask, morph)
return iris
def findCentroid(iris):
M = cv2.moments(iris[:,:,0])
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
centroid = (cX,cY)
return centroid
def createIrisMask(iris, centroid,rad):
cnts, _ = cv2.findContours(np.uint8(iris[:,:,0]), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
flag = 10000
final_cnt = None
for cnt in cnts:
(x,y),radius = cv2.minEnclosingCircle(cnt)
distance = abs(centroid[0]-x)+abs(centroid[1]-y)
if distance < flag :
flag = distance
final_cnt = cnt
else:
continue
(x,y),radius = cv2.minEnclosingCircle(final_cnt)
center = (int(x),int(y))
center = centroid
# radius = int(radius-(radius//4))
radius=(rad//2)+2
print(radius)
irisMask = np.zeros_like(iris)
inverseIrisMask = np.ones_like(iris)*255
cv2.circle(irisMask,center,radius,(255, 255, 255),-1)
cv2.circle(inverseIrisMask,center,radius,(0, 0, 0),-1)
# irisMask = cv2.GaussianBlur(irisMask, (5,5), cv2.BORDER_DEFAULT)
# inverseIrisMask = cv2.GaussianBlur(inverseIrisMask, (5,5), cv2.BORDER_DEFAULT)
return irisMask, inverseIrisMask
def changeEyeColor(im, irisMask, inverseIrisMask):
imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))
imCopy = imCopy.astype(float)/255
irisMask = irisMask.astype(float)/255
inverseIrisMask = inverseIrisMask.astype(float)/255
im = im.astype(float)/255
faceWithoutEye = cv2.multiply(inverseIrisMask, im)
newIris = cv2.multiply(irisMask, imCopy)
result = faceWithoutEye + newIris
return result
def float642Uint8(im):
im2Convert = im.astype(np.float64) / np.amax(im)
im2Convert = 255 * im2Convert
convertedIm = im2Convert.astype(np.uint8)
return convertedIm
# Create eye mask using eye landmarks from facial landmark detection
leftEyeMask = createEyeMask(landmarks[36:42], im)
rightEyeMask = createEyeMask(landmarks[42:48], im)
# Find the iris by thresholding the red channel of the image within the boundaries of the eye mask
leftIris = findIris(leftEyeMask, im, 100)
rightIris = findIris(rightEyeMask, im, 50)
# Find the centroid of the binary image of the eye
leftIrisCentroid = findCentroid(leftIris)
rightIrisCentroid = findCentroid(rightIris)
# Generate the iris mask and its inverse mask
rad_left=eye_aspect_ratio(landmarks[36:42])
rad_right=eye_aspect_ratio(landmarks[42:48])
rightIrisMask, rightInverseIrisMask = createIrisMask(rightIris, rightIrisCentroid,rad_right)
leftIrisMask, leftInverseIrisMask = createIrisMask(leftIris, leftIrisCentroid,rad_left)
# Change the eye color and merge it to the original image
coloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask)
coloredEyesLady = float642Uint8(coloredEyesLady)
coloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask, leftInverseIrisMask)
coloredEyesLady = float642Uint8(coloredEyesLady)
# Present results
cv2.imwrite("3.jpg", coloredEyesLady)
|
flexible
|
{
"blob_id": "65ff3b5137c94890c3293a2ae3f57dee1f60a54c",
"index": 9097,
"step-1": "<mask token>\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\n<mask token>\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\ndef createIrisMask(iris, centroid, rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0] - x) + abs(centroid[1] - y)\n if distance < flag:\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x, y), radius = cv2.minEnclosingCircle(final_cnt)\n center = int(x), int(y)\n center = centroid\n radius = rad // 2 + 2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris) * 255\n cv2.circle(irisMask, center, radius, (255, 255, 255), -1)\n cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)\n return irisMask, inverseIrisMask\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert\n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n rad = (A + B) / 2\n return int(rad)\n\n\n<mask token>\n\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\ndef createIrisMask(iris, centroid, rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0] - x) + abs(centroid[1] - y)\n if distance < flag:\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x, y), radius = cv2.minEnclosingCircle(final_cnt)\n center = int(x), int(y)\n center = centroid\n radius = rad // 2 + 2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris) * 255\n cv2.circle(irisMask, center, radius, (255, 255, 255), -1)\n cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)\n return irisMask, inverseIrisMask\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert\n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\n<mask token>\ncv2.imwrite('3.jpg', coloredEyesLady)\n",
"step-4": "import cv2\nimport dlib\nimport faceBlendCommon as face\nfrom matplotlib import pyplot as plt\nfrom scipy.spatial import distance as dist\nimport numpy as np\nimport cmapy\nimport math\n\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n rad = (A + B) / 2\n return int(rad)\n\n\nim = cv2.imread('imgs/2.jpg')\nPREDICTOR_PATH = './model/shape_predictor_68_face_landmarks.dat'\nfaceDetector = dlib.get_frontal_face_detector()\nlandmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)\nlandmarks = face.getLandmarks(faceDetector, landmarkDetector, im)\n\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\n\ndef findIris(eyeMask, im, thresh):\n r = im[:, :, 2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float) / 255\n eyeMask = eyeMask.astype(float) / 255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:, :, 0])\n cX = int(M['m10'] / M['m00'])\n cY = int(M['m01'] / M['m00'])\n centroid = cX, cY\n return centroid\n\n\ndef createIrisMask(iris, centroid, rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:, :, 0]), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x, y), radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0] - x) + abs(centroid[1] - y)\n if distance < flag:\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x, y), radius = cv2.minEnclosingCircle(final_cnt)\n center = int(x), int(y)\n center = centroid\n radius = rad // 2 + 2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris) * 255\n cv2.circle(irisMask, center, radius, (255, 255, 255), -1)\n cv2.circle(inverseIrisMask, center, radius, (0, 0, 0), -1)\n return irisMask, inverseIrisMask\n\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r'))\n imCopy = imCopy.astype(float) / 255\n irisMask = irisMask.astype(float) / 255\n inverseIrisMask = inverseIrisMask.astype(float) / 255\n im = im.astype(float) / 255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert\n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\nleftEyeMask = createEyeMask(landmarks[36:42], im)\nrightEyeMask = createEyeMask(landmarks[42:48], im)\nleftIris = findIris(leftEyeMask, im, 100)\nrightIris = findIris(rightEyeMask, im, 50)\nleftIrisCentroid = findCentroid(leftIris)\nrightIrisCentroid = findCentroid(rightIris)\nrad_left = eye_aspect_ratio(landmarks[36:42])\nrad_right = eye_aspect_ratio(landmarks[42:48])\nrightIrisMask, rightInverseIrisMask = createIrisMask(rightIris,\n rightIrisCentroid, rad_right)\nleftIrisMask, leftInverseIrisMask = createIrisMask(leftIris,\n leftIrisCentroid, rad_left)\ncoloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\ncoloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask,\n leftInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\ncv2.imwrite('3.jpg', coloredEyesLady)\n",
"step-5": "import cv2\nimport dlib\nimport faceBlendCommon as face\nfrom matplotlib import pyplot as plt\nfrom scipy.spatial import distance as dist\nimport numpy as np\nimport cmapy\nimport math\n\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n rad=(A+B)/2\n return int(rad)\n\n# Load Image\nim = cv2.imread(\"imgs/2.jpg\")\n\n# Detect face landmarks\nPREDICTOR_PATH = r\"./model/shape_predictor_68_face_landmarks.dat\"\nfaceDetector = dlib.get_frontal_face_detector()\nlandmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)\nlandmarks = face.getLandmarks(faceDetector, landmarkDetector, im)\n\ndef createEyeMask(eyeLandmarks, im):\n leftEyePoints = eyeLandmarks\n eyeMask = np.zeros_like(im)\n cv2.fillConvexPoly(eyeMask, np.int32(leftEyePoints), (255, 255, 255))\n eyeMask = np.uint8(eyeMask)\n return eyeMask\n\ndef findIris(eyeMask, im, thresh):\n r = im[:,:,2]\n _, binaryIm = cv2.threshold(r, thresh, 255, cv2.THRESH_BINARY_INV)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4))\n morph = cv2.dilate(binaryIm, kernel, 1)\n morph = cv2.merge((morph, morph, morph))\n morph = morph.astype(float)/255\n eyeMask = eyeMask.astype(float)/255\n iris = cv2.multiply(eyeMask, morph)\n return iris\n\ndef findCentroid(iris):\n M = cv2.moments(iris[:,:,0])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n centroid = (cX,cY)\n return centroid\n\ndef createIrisMask(iris, centroid,rad):\n cnts, _ = cv2.findContours(np.uint8(iris[:,:,0]), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n flag = 10000\n final_cnt = None\n for cnt in cnts:\n (x,y),radius = cv2.minEnclosingCircle(cnt)\n distance = abs(centroid[0]-x)+abs(centroid[1]-y)\n if distance < flag :\n flag = distance\n final_cnt = cnt\n else:\n continue\n (x,y),radius = cv2.minEnclosingCircle(final_cnt)\n center = (int(x),int(y))\n center = centroid\n # radius = int(radius-(radius//4))\n radius=(rad//2)+2\n print(radius)\n irisMask = np.zeros_like(iris)\n inverseIrisMask = np.ones_like(iris)*255\n cv2.circle(irisMask,center,radius,(255, 255, 255),-1)\n cv2.circle(inverseIrisMask,center,radius,(0, 0, 0),-1)\n # irisMask = cv2.GaussianBlur(irisMask, (5,5), cv2.BORDER_DEFAULT)\n # inverseIrisMask = cv2.GaussianBlur(inverseIrisMask, (5,5), cv2.BORDER_DEFAULT)\n return irisMask, inverseIrisMask\n\ndef changeEyeColor(im, irisMask, inverseIrisMask):\n \n imCopy = cv2.applyColorMap(im, cmapy.cmap('Blues_r')) \n imCopy = imCopy.astype(float)/255\n irisMask = irisMask.astype(float)/255\n inverseIrisMask = inverseIrisMask.astype(float)/255\n im = im.astype(float)/255\n faceWithoutEye = cv2.multiply(inverseIrisMask, im)\n newIris = cv2.multiply(irisMask, imCopy)\n result = faceWithoutEye + newIris\n return result\n\ndef float642Uint8(im):\n im2Convert = im.astype(np.float64) / np.amax(im)\n im2Convert = 255 * im2Convert \n convertedIm = im2Convert.astype(np.uint8)\n return convertedIm\n\n\n\n# Create eye mask using eye landmarks from facial landmark detection\nleftEyeMask = createEyeMask(landmarks[36:42], im)\nrightEyeMask = createEyeMask(landmarks[42:48], im)\n\n# Find the iris by thresholding the red channel of the image within the boundaries of the eye mask\nleftIris = findIris(leftEyeMask, im, 100)\nrightIris = findIris(rightEyeMask, im, 50)\n\n\n# Find the centroid of the binary image of the eye\nleftIrisCentroid = findCentroid(leftIris)\nrightIrisCentroid = findCentroid(rightIris)\n\n# Generate the iris mask and its inverse mask\nrad_left=eye_aspect_ratio(landmarks[36:42])\nrad_right=eye_aspect_ratio(landmarks[42:48])\n\n\nrightIrisMask, rightInverseIrisMask = createIrisMask(rightIris, rightIrisCentroid,rad_right)\nleftIrisMask, leftInverseIrisMask = createIrisMask(leftIris, leftIrisCentroid,rad_left)\n\n\n# Change the eye color and merge it to the original image\ncoloredEyesLady = changeEyeColor(im, rightIrisMask, rightInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\ncoloredEyesLady = changeEyeColor(coloredEyesLady, leftIrisMask, leftInverseIrisMask)\ncoloredEyesLady = float642Uint8(coloredEyesLady)\n\n# Present results\ncv2.imwrite(\"3.jpg\", coloredEyesLady)\n",
"step-ids": [
3,
6,
8,
10,
11
]
}
|
[
3,
6,
8,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_attack(attack_config: Dict):
if attack_config['attack_model'] == 'drift':
return DriftAttack(attack_config=attack_config)
elif attack_config['attack_model'] == 'additive_gaussian':
return AdditiveGaussian(attack_config=attack_config)
elif attack_config['attack_model'] == 'random_gaussian':
return RandomGaussian(attack_config=attack_config)
elif attack_config['attack_model'] == 'bit_flip':
return BitFlipAttack(attack_config=attack_config)
elif attack_config['attack_model'] == 'random_sign_flip':
return RandomSignFlipAttack(attack_config=attack_config)
else:
raise NotImplementedError('Invalid attack model: {}'.format(
attack_config['attack_model']))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_attack(attack_config: Dict):
if attack_config['attack_model'] == 'drift':
return DriftAttack(attack_config=attack_config)
elif attack_config['attack_model'] == 'additive_gaussian':
return AdditiveGaussian(attack_config=attack_config)
elif attack_config['attack_model'] == 'random_gaussian':
return RandomGaussian(attack_config=attack_config)
elif attack_config['attack_model'] == 'bit_flip':
return BitFlipAttack(attack_config=attack_config)
elif attack_config['attack_model'] == 'random_sign_flip':
return RandomSignFlipAttack(attack_config=attack_config)
else:
raise NotImplementedError('Invalid attack model: {}'.format(
attack_config['attack_model']))
def launch_attack(attack_mode, mal_nodes):
if attack_mode == 'coordinated':
attacker = mal_nodes[0].attack_model
print("Co-ordinated '{}' attack applied to {} clients".format(
mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
attacker.attack(byz_clients=mal_nodes)
elif attack_mode == 'un_coordinated':
attacker = mal_nodes[0].attack_model
print("Un Co-ordinated '{}' attack applied to {} clients".format(
mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
for mal_client in mal_nodes:
attacker.attack(byz_clients=[mal_client])
else:
raise NotImplementedError
<|reserved_special_token_1|>
from .attack_models import DriftAttack, AdditiveGaussian, RandomGaussian, BitFlipAttack, RandomSignFlipAttack
from typing import Dict
def get_attack(attack_config: Dict):
if attack_config['attack_model'] == 'drift':
return DriftAttack(attack_config=attack_config)
elif attack_config['attack_model'] == 'additive_gaussian':
return AdditiveGaussian(attack_config=attack_config)
elif attack_config['attack_model'] == 'random_gaussian':
return RandomGaussian(attack_config=attack_config)
elif attack_config['attack_model'] == 'bit_flip':
return BitFlipAttack(attack_config=attack_config)
elif attack_config['attack_model'] == 'random_sign_flip':
return RandomSignFlipAttack(attack_config=attack_config)
else:
raise NotImplementedError('Invalid attack model: {}'.format(
attack_config['attack_model']))
def launch_attack(attack_mode, mal_nodes):
if attack_mode == 'coordinated':
attacker = mal_nodes[0].attack_model
print("Co-ordinated '{}' attack applied to {} clients".format(
mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
attacker.attack(byz_clients=mal_nodes)
elif attack_mode == 'un_coordinated':
attacker = mal_nodes[0].attack_model
print("Un Co-ordinated '{}' attack applied to {} clients".format(
mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
for mal_client in mal_nodes:
attacker.attack(byz_clients=[mal_client])
else:
raise NotImplementedError
<|reserved_special_token_1|>
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License
from .attack_models import (DriftAttack, AdditiveGaussian, RandomGaussian,
BitFlipAttack, RandomSignFlipAttack)
from typing import Dict
def get_attack(attack_config: Dict):
if attack_config["attack_model"] == 'drift':
return DriftAttack(attack_config=attack_config)
elif attack_config["attack_model"] == 'additive_gaussian':
return AdditiveGaussian(attack_config=attack_config)
elif attack_config["attack_model"] == 'random_gaussian':
return RandomGaussian(attack_config=attack_config)
elif attack_config["attack_model"] == 'bit_flip':
return BitFlipAttack(attack_config=attack_config)
elif attack_config["attack_model"] == 'random_sign_flip':
return RandomSignFlipAttack(attack_config=attack_config)
else:
raise NotImplementedError("Invalid attack model: {}".format(attack_config["attack_model"]))
def launch_attack(attack_mode, mal_nodes):
if attack_mode == 'coordinated':
# Co-ordinated Attack
attacker = mal_nodes[0].attack_model
print('Co-ordinated \'{}\' attack applied to {} clients'.format(mal_nodes[0].attack_model.attack_algorithm,
len(mal_nodes)))
attacker.attack(byz_clients=mal_nodes)
elif attack_mode == 'un_coordinated':
# un_coordinated stand alone attack per client
attacker = mal_nodes[0].attack_model
print('Un Co-ordinated \'{}\' attack applied to {} clients'.
format(mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))
for mal_client in mal_nodes:
attacker.attack(byz_clients=[mal_client])
else:
raise NotImplementedError
|
flexible
|
{
"blob_id": "11320922d24b27c5cfa714f88eb0a757deef987f",
"index": 8546,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_attack(attack_config: Dict):\n if attack_config['attack_model'] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError('Invalid attack model: {}'.format(\n attack_config['attack_model']))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_attack(attack_config: Dict):\n if attack_config['attack_model'] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError('Invalid attack model: {}'.format(\n attack_config['attack_model']))\n\n\ndef launch_attack(attack_mode, mal_nodes):\n if attack_mode == 'coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n attacker.attack(byz_clients=mal_nodes)\n elif attack_mode == 'un_coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Un Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n for mal_client in mal_nodes:\n attacker.attack(byz_clients=[mal_client])\n else:\n raise NotImplementedError\n",
"step-4": "from .attack_models import DriftAttack, AdditiveGaussian, RandomGaussian, BitFlipAttack, RandomSignFlipAttack\nfrom typing import Dict\n\n\ndef get_attack(attack_config: Dict):\n if attack_config['attack_model'] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config['attack_model'] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config['attack_model'] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError('Invalid attack model: {}'.format(\n attack_config['attack_model']))\n\n\ndef launch_attack(attack_mode, mal_nodes):\n if attack_mode == 'coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n attacker.attack(byz_clients=mal_nodes)\n elif attack_mode == 'un_coordinated':\n attacker = mal_nodes[0].attack_model\n print(\"Un Co-ordinated '{}' attack applied to {} clients\".format(\n mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n for mal_client in mal_nodes:\n attacker.attack(byz_clients=[mal_client])\n else:\n raise NotImplementedError\n",
"step-5": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License\nfrom .attack_models import (DriftAttack, AdditiveGaussian, RandomGaussian,\n BitFlipAttack, RandomSignFlipAttack)\nfrom typing import Dict\n\n\ndef get_attack(attack_config: Dict):\n if attack_config[\"attack_model\"] == 'drift':\n return DriftAttack(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'additive_gaussian':\n return AdditiveGaussian(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'random_gaussian':\n return RandomGaussian(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'bit_flip':\n return BitFlipAttack(attack_config=attack_config)\n elif attack_config[\"attack_model\"] == 'random_sign_flip':\n return RandomSignFlipAttack(attack_config=attack_config)\n else:\n raise NotImplementedError(\"Invalid attack model: {}\".format(attack_config[\"attack_model\"]))\n\n\ndef launch_attack(attack_mode, mal_nodes):\n if attack_mode == 'coordinated':\n # Co-ordinated Attack\n attacker = mal_nodes[0].attack_model\n print('Co-ordinated \\'{}\\' attack applied to {} clients'.format(mal_nodes[0].attack_model.attack_algorithm,\n len(mal_nodes)))\n attacker.attack(byz_clients=mal_nodes)\n elif attack_mode == 'un_coordinated':\n # un_coordinated stand alone attack per client\n attacker = mal_nodes[0].attack_model\n print('Un Co-ordinated \\'{}\\' attack applied to {} clients'.\n format(mal_nodes[0].attack_model.attack_algorithm, len(mal_nodes)))\n for mal_client in mal_nodes:\n attacker.attack(byz_clients=[mal_client])\n else:\n raise NotImplementedError\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method:
Optional[Union[Callable, str]]=None, **kwargs) ->np.ndarray:
"""Which list-of-values does every element of series match first?
Warnings:
Order of LoVs is important as only the first match is considered.
Args:
series: pandas Series of data with index
patterns: list of lists-of-values
method: method to use for pattern matching
Options are:
- None: elements of series and values are checked for equality
- 'match', 'contains', 'startswith', 'endswith': pandas' Series.str.<...> methods used, with arguments passed as kwargs
- custom function that accepts series, values (flat list of all values across all LoVs) and kwargs
**kwargs:
additional keyword arguments to pass to matching functions
Returns:
Numeric numpy array
- 0 means no match found in any of the LoV
- 1 means some value of LoV #0 matched
- 2 means some value of LoV #1 matched
- etc.
"""
elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]
if not elov:
return np.zeros(series.size, int)
num, value = zip(*elov)
lov_idx_plus = np.concatenate(([0], num))
if method is None:
mm = series.to_numpy() == np.array(value)[:, np.newaxis]
elif not callable(method):
ptns = pd.Series(value)
kwargs['na'] = False
do_match = getattr(series.str, method)
mm = ptns.apply(do_match, **kwargs).values
else:
mm = method(series, value, **kwargs)
return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method:
Optional[Union[Callable, str]]=None, **kwargs) ->np.ndarray:
"""Which list-of-values does every element of series match first?
Warnings:
Order of LoVs is important as only the first match is considered.
Args:
series: pandas Series of data with index
patterns: list of lists-of-values
method: method to use for pattern matching
Options are:
- None: elements of series and values are checked for equality
- 'match', 'contains', 'startswith', 'endswith': pandas' Series.str.<...> methods used, with arguments passed as kwargs
- custom function that accepts series, values (flat list of all values across all LoVs) and kwargs
**kwargs:
additional keyword arguments to pass to matching functions
Returns:
Numeric numpy array
- 0 means no match found in any of the LoV
- 1 means some value of LoV #0 matched
- 2 means some value of LoV #1 matched
- etc.
"""
elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]
if not elov:
return np.zeros(series.size, int)
num, value = zip(*elov)
lov_idx_plus = np.concatenate(([0], num))
if method is None:
mm = series.to_numpy() == np.array(value)[:, np.newaxis]
elif not callable(method):
ptns = pd.Series(value)
kwargs['na'] = False
do_match = getattr(series.str, method)
mm = ptns.apply(do_match, **kwargs).values
else:
mm = method(series, value, **kwargs)
return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]
def which_tag(series: pd.Series, taglov: Union[TagLoV, Any], na: Any, donor:
pd.Series=None, method: Optional[Union[Callable, str]]=None, **kwargs):
"""Returns tag of the first matched List-of-Values.
For each element in ``series`` returned is the tag of the list-of-values
in the dictionary of LoVs ``taglov`` which first matches the element with
one of its values *OR* value from donor with the same index *OR* ``na``.
For matching methods see :any:`which_lov`.
Args:
series: pandas Series of data
taglov: tagged list of values: TagLov object or anything that can
properly initialise it, including None
na: value to use if element is not matched, last resort
donor: pandas Series of data to pick in case element is not matched
method: name of Series.str.<...> method or None for equality check or
custom function
**kwargs: arguments to the method
Returns:
Series
"""
if series.empty:
return series
if not isinstance(taglov, TagLoV):
taglov = TagLoV(taglov)
lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)
tags_plus = np.array((na, *taglov.tags))
result = pd.Series(tags_plus[lov_idx_plus], index=series.index)
if isinstance(donor, pd.Series):
unmatched_idx = series.index[~lov_idx_plus.astype(bool)]
if not unmatched_idx.empty:
take_idx = unmatched_idx.intersection(donor.index)
if not take_idx.empty:
result[take_idx] = donor[take_idx]
return result
<|reserved_special_token_1|>
from typing import Any, Sequence, Callable, Union, Optional
import pandas as pd
import numpy as np
from .taglov import TagLoV
def which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method:
Optional[Union[Callable, str]]=None, **kwargs) ->np.ndarray:
"""Which list-of-values does every element of series match first?
Warnings:
Order of LoVs is important as only the first match is considered.
Args:
series: pandas Series of data with index
patterns: list of lists-of-values
method: method to use for pattern matching
Options are:
- None: elements of series and values are checked for equality
- 'match', 'contains', 'startswith', 'endswith': pandas' Series.str.<...> methods used, with arguments passed as kwargs
- custom function that accepts series, values (flat list of all values across all LoVs) and kwargs
**kwargs:
additional keyword arguments to pass to matching functions
Returns:
Numeric numpy array
- 0 means no match found in any of the LoV
- 1 means some value of LoV #0 matched
- 2 means some value of LoV #1 matched
- etc.
"""
elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]
if not elov:
return np.zeros(series.size, int)
num, value = zip(*elov)
lov_idx_plus = np.concatenate(([0], num))
if method is None:
mm = series.to_numpy() == np.array(value)[:, np.newaxis]
elif not callable(method):
ptns = pd.Series(value)
kwargs['na'] = False
do_match = getattr(series.str, method)
mm = ptns.apply(do_match, **kwargs).values
else:
mm = method(series, value, **kwargs)
return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]
def which_tag(series: pd.Series, taglov: Union[TagLoV, Any], na: Any, donor:
pd.Series=None, method: Optional[Union[Callable, str]]=None, **kwargs):
"""Returns tag of the first matched List-of-Values.
For each element in ``series`` returned is the tag of the list-of-values
in the dictionary of LoVs ``taglov`` which first matches the element with
one of its values *OR* value from donor with the same index *OR* ``na``.
For matching methods see :any:`which_lov`.
Args:
series: pandas Series of data
taglov: tagged list of values: TagLov object or anything that can
properly initialise it, including None
na: value to use if element is not matched, last resort
donor: pandas Series of data to pick in case element is not matched
method: name of Series.str.<...> method or None for equality check or
custom function
**kwargs: arguments to the method
Returns:
Series
"""
if series.empty:
return series
if not isinstance(taglov, TagLoV):
taglov = TagLoV(taglov)
lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)
tags_plus = np.array((na, *taglov.tags))
result = pd.Series(tags_plus[lov_idx_plus], index=series.index)
if isinstance(donor, pd.Series):
unmatched_idx = series.index[~lov_idx_plus.astype(bool)]
if not unmatched_idx.empty:
take_idx = unmatched_idx.intersection(donor.index)
if not take_idx.empty:
result[take_idx] = donor[take_idx]
return result
<|reserved_special_token_1|>
from typing import Any, Sequence, Callable, Union, Optional
import pandas as pd
import numpy as np
from .taglov import TagLoV
def which_lov(series: pd.Series,
patterns: Sequence[Sequence[Any]],
method: Optional[Union[Callable, str]] = None,
**kwargs) -> np.ndarray:
"""Which list-of-values does every element of series match first?
Warnings:
Order of LoVs is important as only the first match is considered.
Args:
series: pandas Series of data with index
patterns: list of lists-of-values
method: method to use for pattern matching
Options are:
- None: elements of series and values are checked for equality
- 'match', 'contains', 'startswith', 'endswith': pandas'\
Series.str.<...> methods used, with arguments passed as kwargs
- custom function that accepts series, values (flat list of all\
values across all LoVs) and kwargs
**kwargs:
additional keyword arguments to pass to matching functions
Returns:
Numeric numpy array
- 0 means no match found in any of the LoV
- 1 means some value of LoV #0 matched
- 2 means some value of LoV #1 matched
- etc.
"""
elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]
if not elov:
return np.zeros(series.size, int)
num, value = zip(*elov)
lov_idx_plus = np.concatenate(([0], num))
if method is None:
mm = series.to_numpy() == np.array(value)[:, np.newaxis]
elif not callable(method): # assume name of pd.Series.str method
ptns = pd.Series(value)
kwargs['na'] = False
do_match = getattr(series.str, method)
mm = ptns.apply(do_match, **kwargs).values
else:
mm = method(series, value, **kwargs)
return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]
def which_tag(series: pd.Series,
taglov: Union[TagLoV, Any],
na: Any,
donor: pd.Series = None,
method: Optional[Union[Callable, str]] = None,
**kwargs):
"""Returns tag of the first matched List-of-Values.
For each element in ``series`` returned is the tag of the list-of-values
in the dictionary of LoVs ``taglov`` which first matches the element with
one of its values *OR* value from donor with the same index *OR* ``na``.
For matching methods see :any:`which_lov`.
Args:
series: pandas Series of data
taglov: tagged list of values: TagLov object or anything that can
properly initialise it, including None
na: value to use if element is not matched, last resort
donor: pandas Series of data to pick in case element is not matched
method: name of Series.str.<...> method or None for equality check or
custom function
**kwargs: arguments to the method
Returns:
Series
"""
if series.empty:
return series
if not isinstance(taglov, TagLoV):
taglov = TagLoV(taglov)
lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)
tags_plus = np.array((na, *taglov.tags))
result = pd.Series(tags_plus[lov_idx_plus], index=series.index)
if isinstance(donor, pd.Series): # take unmatched values from donor
unmatched_idx = series.index[~lov_idx_plus.astype(bool)]
if not unmatched_idx.empty:
take_idx = unmatched_idx.intersection(donor.index)
if not take_idx.empty:
result[take_idx] = donor[take_idx]
return result
|
flexible
|
{
"blob_id": "7b9bf791d52fdc801e24d0c8541d77d91a488e12",
"index": 3361,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method:\n Optional[Union[Callable, str]]=None, **kwargs) ->np.ndarray:\n \"\"\"Which list-of-values does every element of series match first?\n\n Warnings:\n Order of LoVs is important as only the first match is considered.\n\n Args:\n series: pandas Series of data with index\n patterns: list of lists-of-values\n method: method to use for pattern matching\n\n Options are:\n - None: elements of series and values are checked for equality\n - 'match', 'contains', 'startswith', 'endswith': pandas' Series.str.<...> methods used, with arguments passed as kwargs\n - custom function that accepts series, values (flat list of all values across all LoVs) and kwargs\n **kwargs:\n additional keyword arguments to pass to matching functions\n\n Returns:\n Numeric numpy array\n - 0 means no match found in any of the LoV\n - 1 means some value of LoV #0 matched\n - 2 means some value of LoV #1 matched\n - etc.\n\n \"\"\"\n elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]\n if not elov:\n return np.zeros(series.size, int)\n num, value = zip(*elov)\n lov_idx_plus = np.concatenate(([0], num))\n if method is None:\n mm = series.to_numpy() == np.array(value)[:, np.newaxis]\n elif not callable(method):\n ptns = pd.Series(value)\n kwargs['na'] = False\n do_match = getattr(series.str, method)\n mm = ptns.apply(do_match, **kwargs).values\n else:\n mm = method(series, value, **kwargs)\n return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method:\n Optional[Union[Callable, str]]=None, **kwargs) ->np.ndarray:\n \"\"\"Which list-of-values does every element of series match first?\n\n Warnings:\n Order of LoVs is important as only the first match is considered.\n\n Args:\n series: pandas Series of data with index\n patterns: list of lists-of-values\n method: method to use for pattern matching\n\n Options are:\n - None: elements of series and values are checked for equality\n - 'match', 'contains', 'startswith', 'endswith': pandas' Series.str.<...> methods used, with arguments passed as kwargs\n - custom function that accepts series, values (flat list of all values across all LoVs) and kwargs\n **kwargs:\n additional keyword arguments to pass to matching functions\n\n Returns:\n Numeric numpy array\n - 0 means no match found in any of the LoV\n - 1 means some value of LoV #0 matched\n - 2 means some value of LoV #1 matched\n - etc.\n\n \"\"\"\n elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]\n if not elov:\n return np.zeros(series.size, int)\n num, value = zip(*elov)\n lov_idx_plus = np.concatenate(([0], num))\n if method is None:\n mm = series.to_numpy() == np.array(value)[:, np.newaxis]\n elif not callable(method):\n ptns = pd.Series(value)\n kwargs['na'] = False\n do_match = getattr(series.str, method)\n mm = ptns.apply(do_match, **kwargs).values\n else:\n mm = method(series, value, **kwargs)\n return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]\n\n\ndef which_tag(series: pd.Series, taglov: Union[TagLoV, Any], na: Any, donor:\n pd.Series=None, method: Optional[Union[Callable, str]]=None, **kwargs):\n \"\"\"Returns tag of the first matched List-of-Values.\n\n For each element in ``series`` returned is the tag of the list-of-values\n in the dictionary of LoVs ``taglov`` which first matches the element with\n one of its values *OR* value from donor with the same index *OR* ``na``.\n For matching methods see :any:`which_lov`.\n\n Args:\n series: pandas Series of data\n taglov: tagged list of values: TagLov object or anything that can\n properly initialise it, including None\n na: value to use if element is not matched, last resort\n donor: pandas Series of data to pick in case element is not matched\n method: name of Series.str.<...> method or None for equality check or\n custom function\n **kwargs: arguments to the method\n\n Returns:\n Series\n\n \"\"\"\n if series.empty:\n return series\n if not isinstance(taglov, TagLoV):\n taglov = TagLoV(taglov)\n lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)\n tags_plus = np.array((na, *taglov.tags))\n result = pd.Series(tags_plus[lov_idx_plus], index=series.index)\n if isinstance(donor, pd.Series):\n unmatched_idx = series.index[~lov_idx_plus.astype(bool)]\n if not unmatched_idx.empty:\n take_idx = unmatched_idx.intersection(donor.index)\n if not take_idx.empty:\n result[take_idx] = donor[take_idx]\n return result\n",
"step-4": "from typing import Any, Sequence, Callable, Union, Optional\nimport pandas as pd\nimport numpy as np\nfrom .taglov import TagLoV\n\n\ndef which_lov(series: pd.Series, patterns: Sequence[Sequence[Any]], method:\n Optional[Union[Callable, str]]=None, **kwargs) ->np.ndarray:\n \"\"\"Which list-of-values does every element of series match first?\n\n Warnings:\n Order of LoVs is important as only the first match is considered.\n\n Args:\n series: pandas Series of data with index\n patterns: list of lists-of-values\n method: method to use for pattern matching\n\n Options are:\n - None: elements of series and values are checked for equality\n - 'match', 'contains', 'startswith', 'endswith': pandas' Series.str.<...> methods used, with arguments passed as kwargs\n - custom function that accepts series, values (flat list of all values across all LoVs) and kwargs\n **kwargs:\n additional keyword arguments to pass to matching functions\n\n Returns:\n Numeric numpy array\n - 0 means no match found in any of the LoV\n - 1 means some value of LoV #0 matched\n - 2 means some value of LoV #1 matched\n - etc.\n\n \"\"\"\n elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]\n if not elov:\n return np.zeros(series.size, int)\n num, value = zip(*elov)\n lov_idx_plus = np.concatenate(([0], num))\n if method is None:\n mm = series.to_numpy() == np.array(value)[:, np.newaxis]\n elif not callable(method):\n ptns = pd.Series(value)\n kwargs['na'] = False\n do_match = getattr(series.str, method)\n mm = ptns.apply(do_match, **kwargs).values\n else:\n mm = method(series, value, **kwargs)\n return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]\n\n\ndef which_tag(series: pd.Series, taglov: Union[TagLoV, Any], na: Any, donor:\n pd.Series=None, method: Optional[Union[Callable, str]]=None, **kwargs):\n \"\"\"Returns tag of the first matched List-of-Values.\n\n For each element in ``series`` returned is the tag of the list-of-values\n in the dictionary of LoVs ``taglov`` which first matches the element with\n one of its values *OR* value from donor with the same index *OR* ``na``.\n For matching methods see :any:`which_lov`.\n\n Args:\n series: pandas Series of data\n taglov: tagged list of values: TagLov object or anything that can\n properly initialise it, including None\n na: value to use if element is not matched, last resort\n donor: pandas Series of data to pick in case element is not matched\n method: name of Series.str.<...> method or None for equality check or\n custom function\n **kwargs: arguments to the method\n\n Returns:\n Series\n\n \"\"\"\n if series.empty:\n return series\n if not isinstance(taglov, TagLoV):\n taglov = TagLoV(taglov)\n lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)\n tags_plus = np.array((na, *taglov.tags))\n result = pd.Series(tags_plus[lov_idx_plus], index=series.index)\n if isinstance(donor, pd.Series):\n unmatched_idx = series.index[~lov_idx_plus.astype(bool)]\n if not unmatched_idx.empty:\n take_idx = unmatched_idx.intersection(donor.index)\n if not take_idx.empty:\n result[take_idx] = donor[take_idx]\n return result\n",
"step-5": "from typing import Any, Sequence, Callable, Union, Optional\nimport pandas as pd\nimport numpy as np\nfrom .taglov import TagLoV\n\n\ndef which_lov(series: pd.Series,\n patterns: Sequence[Sequence[Any]],\n method: Optional[Union[Callable, str]] = None,\n **kwargs) -> np.ndarray:\n \"\"\"Which list-of-values does every element of series match first?\n\n Warnings:\n Order of LoVs is important as only the first match is considered.\n\n Args:\n series: pandas Series of data with index\n patterns: list of lists-of-values\n method: method to use for pattern matching\n\n Options are:\n - None: elements of series and values are checked for equality\n - 'match', 'contains', 'startswith', 'endswith': pandas'\\\n Series.str.<...> methods used, with arguments passed as kwargs\n - custom function that accepts series, values (flat list of all\\\n values across all LoVs) and kwargs\n **kwargs:\n additional keyword arguments to pass to matching functions\n\n Returns:\n Numeric numpy array\n - 0 means no match found in any of the LoV\n - 1 means some value of LoV #0 matched\n - 2 means some value of LoV #1 matched\n - etc.\n\n \"\"\"\n elov = [(i + 1, v) for i, lov in enumerate(patterns) for v in lov]\n if not elov:\n return np.zeros(series.size, int)\n num, value = zip(*elov)\n lov_idx_plus = np.concatenate(([0], num))\n if method is None:\n mm = series.to_numpy() == np.array(value)[:, np.newaxis]\n elif not callable(method): # assume name of pd.Series.str method\n ptns = pd.Series(value)\n kwargs['na'] = False\n do_match = getattr(series.str, method)\n mm = ptns.apply(do_match, **kwargs).values\n else:\n mm = method(series, value, **kwargs)\n return lov_idx_plus[mm.any(axis=0) + mm.argmax(axis=0)]\n\n\ndef which_tag(series: pd.Series,\n taglov: Union[TagLoV, Any],\n na: Any,\n donor: pd.Series = None,\n method: Optional[Union[Callable, str]] = None,\n **kwargs):\n \"\"\"Returns tag of the first matched List-of-Values.\n\n For each element in ``series`` returned is the tag of the list-of-values\n in the dictionary of LoVs ``taglov`` which first matches the element with\n one of its values *OR* value from donor with the same index *OR* ``na``.\n For matching methods see :any:`which_lov`.\n\n Args:\n series: pandas Series of data\n taglov: tagged list of values: TagLov object or anything that can\n properly initialise it, including None\n na: value to use if element is not matched, last resort\n donor: pandas Series of data to pick in case element is not matched\n method: name of Series.str.<...> method or None for equality check or\n custom function\n **kwargs: arguments to the method\n\n Returns:\n Series\n\n \"\"\"\n if series.empty:\n return series\n if not isinstance(taglov, TagLoV):\n taglov = TagLoV(taglov)\n lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs)\n tags_plus = np.array((na, *taglov.tags))\n result = pd.Series(tags_plus[lov_idx_plus], index=series.index)\n if isinstance(donor, pd.Series): # take unmatched values from donor\n unmatched_idx = series.index[~lov_idx_plus.astype(bool)]\n if not unmatched_idx.empty:\n take_idx = unmatched_idx.intersection(donor.index)\n if not take_idx.empty:\n result[take_idx] = donor[take_idx]\n return result\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_dev_key():
ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')
)
if os.path.exists(ads_dev_key_filename):
with open(ads_dev_key_filename, 'r') as fp:
dev_key = fp.readline().rstrip()
return dev_key
dev_key = os.environ.get('ADS_DEV_KEY', None)
if dev_key is None:
raise IOError('no ADS API key found in ~/.ads/dev_key')
return dev_key
def get_author_locations(author, return_json=False):
name = sorted([a.strip() for a in author.split(',')], reverse=True, key
=lambda n: len(n))[0].lower()
params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),
'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}
response = requests.post(ads_api_url, params=params)
if response.status_code != requests.codes.ok:
response.raise_for_status()
codes = response.json().get('results', {}).get('docs', None)
if codes is None:
return []
results = [(el.get('bibcode'), el.get('year')) for el in codes]
affils = []
for code, year in results:
if year is None:
continue
text = rdb.get('career:{0}'.format(code))
if text is None:
url = ads_html_url + code
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
text = r.text
rdb.set('career:{0}'.format(code), text)
tree = lxml.html.fromstring(text)
for author in tree.find_class('author'):
if name in author.find_class('authorName')[0].text.lower():
a = author.find_class('authorAffiliation')
if len(a):
affils.append((int(year), a[0].text.strip('()').strip()))
break
return affils
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_dev_key():
ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')
)
if os.path.exists(ads_dev_key_filename):
with open(ads_dev_key_filename, 'r') as fp:
dev_key = fp.readline().rstrip()
return dev_key
dev_key = os.environ.get('ADS_DEV_KEY', None)
if dev_key is None:
raise IOError('no ADS API key found in ~/.ads/dev_key')
return dev_key
def get_author_locations(author, return_json=False):
name = sorted([a.strip() for a in author.split(',')], reverse=True, key
=lambda n: len(n))[0].lower()
params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),
'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}
response = requests.post(ads_api_url, params=params)
if response.status_code != requests.codes.ok:
response.raise_for_status()
codes = response.json().get('results', {}).get('docs', None)
if codes is None:
return []
results = [(el.get('bibcode'), el.get('year')) for el in codes]
affils = []
for code, year in results:
if year is None:
continue
text = rdb.get('career:{0}'.format(code))
if text is None:
url = ads_html_url + code
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
text = r.text
rdb.set('career:{0}'.format(code), text)
tree = lxml.html.fromstring(text)
for author in tree.find_class('author'):
if name in author.find_class('authorName')[0].text.lower():
a = author.find_class('authorAffiliation')
if len(a):
affils.append((int(year), a[0].text.strip('()').strip()))
break
return affils
if __name__ == '__main__':
print(get_author_locations('foreman-mackey'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ads_api_url = 'http://adslabs.org/adsabs/api/search/'
ads_html_url = 'http://labs.adsabs.harvard.edu/adsabs/abs/'
rdb = redis.Redis()
def get_dev_key():
ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')
)
if os.path.exists(ads_dev_key_filename):
with open(ads_dev_key_filename, 'r') as fp:
dev_key = fp.readline().rstrip()
return dev_key
dev_key = os.environ.get('ADS_DEV_KEY', None)
if dev_key is None:
raise IOError('no ADS API key found in ~/.ads/dev_key')
return dev_key
def get_author_locations(author, return_json=False):
name = sorted([a.strip() for a in author.split(',')], reverse=True, key
=lambda n: len(n))[0].lower()
params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),
'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}
response = requests.post(ads_api_url, params=params)
if response.status_code != requests.codes.ok:
response.raise_for_status()
codes = response.json().get('results', {}).get('docs', None)
if codes is None:
return []
results = [(el.get('bibcode'), el.get('year')) for el in codes]
affils = []
for code, year in results:
if year is None:
continue
text = rdb.get('career:{0}'.format(code))
if text is None:
url = ads_html_url + code
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
text = r.text
rdb.set('career:{0}'.format(code), text)
tree = lxml.html.fromstring(text)
for author in tree.find_class('author'):
if name in author.find_class('authorName')[0].text.lower():
a = author.find_class('authorAffiliation')
if len(a):
affils.append((int(year), a[0].text.strip('()').strip()))
break
return affils
if __name__ == '__main__':
print(get_author_locations('foreman-mackey'))
<|reserved_special_token_1|>
import os
import redis
import requests
import lxml.html
ads_api_url = 'http://adslabs.org/adsabs/api/search/'
ads_html_url = 'http://labs.adsabs.harvard.edu/adsabs/abs/'
rdb = redis.Redis()
def get_dev_key():
ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')
)
if os.path.exists(ads_dev_key_filename):
with open(ads_dev_key_filename, 'r') as fp:
dev_key = fp.readline().rstrip()
return dev_key
dev_key = os.environ.get('ADS_DEV_KEY', None)
if dev_key is None:
raise IOError('no ADS API key found in ~/.ads/dev_key')
return dev_key
def get_author_locations(author, return_json=False):
name = sorted([a.strip() for a in author.split(',')], reverse=True, key
=lambda n: len(n))[0].lower()
params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),
'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}
response = requests.post(ads_api_url, params=params)
if response.status_code != requests.codes.ok:
response.raise_for_status()
codes = response.json().get('results', {}).get('docs', None)
if codes is None:
return []
results = [(el.get('bibcode'), el.get('year')) for el in codes]
affils = []
for code, year in results:
if year is None:
continue
text = rdb.get('career:{0}'.format(code))
if text is None:
url = ads_html_url + code
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
text = r.text
rdb.set('career:{0}'.format(code), text)
tree = lxml.html.fromstring(text)
for author in tree.find_class('author'):
if name in author.find_class('authorName')[0].text.lower():
a = author.find_class('authorAffiliation')
if len(a):
affils.append((int(year), a[0].text.strip('()').strip()))
break
return affils
if __name__ == '__main__':
print(get_author_locations('foreman-mackey'))
<|reserved_special_token_1|>
import os
import redis
import requests
import lxml.html
ads_api_url = "http://adslabs.org/adsabs/api/search/"
ads_html_url = "http://labs.adsabs.harvard.edu/adsabs/abs/"
rdb = redis.Redis()
def get_dev_key():
# Credit: Andy Casey
ads_dev_key_filename = os.path.abspath(
os.path.expanduser("~/.ads/dev_key"))
if os.path.exists(ads_dev_key_filename):
with open(ads_dev_key_filename, "r") as fp:
dev_key = fp.readline().rstrip()
return dev_key
dev_key = os.environ.get("ADS_DEV_KEY", None)
if dev_key is None:
raise IOError("no ADS API key found in ~/.ads/dev_key")
return dev_key
def get_author_locations(author, return_json=False):
name = sorted([a.strip() for a in author.split(",")], reverse=True,
key=lambda n: len(n))[0].lower()
params = {
"q": "author:{0}".format(author),
"dev_key": get_dev_key(),
"rows": 200,
"filter": "database:astronomy",
"fl": "bibcode,year",
}
response = requests.post(ads_api_url, params=params)
if response.status_code != requests.codes.ok:
response.raise_for_status()
codes = response.json().get("results", {}).get("docs", None)
if codes is None:
return []
results = [(el.get("bibcode"), el.get("year")) for el in codes]
affils = []
for code, year in results:
if year is None:
continue
text = rdb.get("career:{0}".format(code))
if text is None:
url = ads_html_url + code
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
text = r.text
rdb.set("career:{0}".format(code), text)
tree = lxml.html.fromstring(text)
for author in tree.find_class("author"):
if name in author.find_class("authorName")[0].text.lower():
a = author.find_class("authorAffiliation")
if len(a):
affils.append((int(year), a[0].text.strip("()").strip()))
break
return affils
if __name__ == "__main__":
print(get_author_locations("foreman-mackey"))
|
flexible
|
{
"blob_id": "9e314cdf4ef09ecf4a4b43358ae32f76c40aaea8",
"index": 8037,
"step-1": "<mask token>\n\n\ndef get_dev_key():\n ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')\n )\n if os.path.exists(ads_dev_key_filename):\n with open(ads_dev_key_filename, 'r') as fp:\n dev_key = fp.readline().rstrip()\n return dev_key\n dev_key = os.environ.get('ADS_DEV_KEY', None)\n if dev_key is None:\n raise IOError('no ADS API key found in ~/.ads/dev_key')\n return dev_key\n\n\ndef get_author_locations(author, return_json=False):\n name = sorted([a.strip() for a in author.split(',')], reverse=True, key\n =lambda n: len(n))[0].lower()\n params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),\n 'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}\n response = requests.post(ads_api_url, params=params)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n codes = response.json().get('results', {}).get('docs', None)\n if codes is None:\n return []\n results = [(el.get('bibcode'), el.get('year')) for el in codes]\n affils = []\n for code, year in results:\n if year is None:\n continue\n text = rdb.get('career:{0}'.format(code))\n if text is None:\n url = ads_html_url + code\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n text = r.text\n rdb.set('career:{0}'.format(code), text)\n tree = lxml.html.fromstring(text)\n for author in tree.find_class('author'):\n if name in author.find_class('authorName')[0].text.lower():\n a = author.find_class('authorAffiliation')\n if len(a):\n affils.append((int(year), a[0].text.strip('()').strip()))\n break\n return affils\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_dev_key():\n ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')\n )\n if os.path.exists(ads_dev_key_filename):\n with open(ads_dev_key_filename, 'r') as fp:\n dev_key = fp.readline().rstrip()\n return dev_key\n dev_key = os.environ.get('ADS_DEV_KEY', None)\n if dev_key is None:\n raise IOError('no ADS API key found in ~/.ads/dev_key')\n return dev_key\n\n\ndef get_author_locations(author, return_json=False):\n name = sorted([a.strip() for a in author.split(',')], reverse=True, key\n =lambda n: len(n))[0].lower()\n params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),\n 'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}\n response = requests.post(ads_api_url, params=params)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n codes = response.json().get('results', {}).get('docs', None)\n if codes is None:\n return []\n results = [(el.get('bibcode'), el.get('year')) for el in codes]\n affils = []\n for code, year in results:\n if year is None:\n continue\n text = rdb.get('career:{0}'.format(code))\n if text is None:\n url = ads_html_url + code\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n text = r.text\n rdb.set('career:{0}'.format(code), text)\n tree = lxml.html.fromstring(text)\n for author in tree.find_class('author'):\n if name in author.find_class('authorName')[0].text.lower():\n a = author.find_class('authorAffiliation')\n if len(a):\n affils.append((int(year), a[0].text.strip('()').strip()))\n break\n return affils\n\n\nif __name__ == '__main__':\n print(get_author_locations('foreman-mackey'))\n",
"step-3": "<mask token>\nads_api_url = 'http://adslabs.org/adsabs/api/search/'\nads_html_url = 'http://labs.adsabs.harvard.edu/adsabs/abs/'\nrdb = redis.Redis()\n\n\ndef get_dev_key():\n ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')\n )\n if os.path.exists(ads_dev_key_filename):\n with open(ads_dev_key_filename, 'r') as fp:\n dev_key = fp.readline().rstrip()\n return dev_key\n dev_key = os.environ.get('ADS_DEV_KEY', None)\n if dev_key is None:\n raise IOError('no ADS API key found in ~/.ads/dev_key')\n return dev_key\n\n\ndef get_author_locations(author, return_json=False):\n name = sorted([a.strip() for a in author.split(',')], reverse=True, key\n =lambda n: len(n))[0].lower()\n params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),\n 'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}\n response = requests.post(ads_api_url, params=params)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n codes = response.json().get('results', {}).get('docs', None)\n if codes is None:\n return []\n results = [(el.get('bibcode'), el.get('year')) for el in codes]\n affils = []\n for code, year in results:\n if year is None:\n continue\n text = rdb.get('career:{0}'.format(code))\n if text is None:\n url = ads_html_url + code\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n text = r.text\n rdb.set('career:{0}'.format(code), text)\n tree = lxml.html.fromstring(text)\n for author in tree.find_class('author'):\n if name in author.find_class('authorName')[0].text.lower():\n a = author.find_class('authorAffiliation')\n if len(a):\n affils.append((int(year), a[0].text.strip('()').strip()))\n break\n return affils\n\n\nif __name__ == '__main__':\n print(get_author_locations('foreman-mackey'))\n",
"step-4": "import os\nimport redis\nimport requests\nimport lxml.html\nads_api_url = 'http://adslabs.org/adsabs/api/search/'\nads_html_url = 'http://labs.adsabs.harvard.edu/adsabs/abs/'\nrdb = redis.Redis()\n\n\ndef get_dev_key():\n ads_dev_key_filename = os.path.abspath(os.path.expanduser('~/.ads/dev_key')\n )\n if os.path.exists(ads_dev_key_filename):\n with open(ads_dev_key_filename, 'r') as fp:\n dev_key = fp.readline().rstrip()\n return dev_key\n dev_key = os.environ.get('ADS_DEV_KEY', None)\n if dev_key is None:\n raise IOError('no ADS API key found in ~/.ads/dev_key')\n return dev_key\n\n\ndef get_author_locations(author, return_json=False):\n name = sorted([a.strip() for a in author.split(',')], reverse=True, key\n =lambda n: len(n))[0].lower()\n params = {'q': 'author:{0}'.format(author), 'dev_key': get_dev_key(),\n 'rows': 200, 'filter': 'database:astronomy', 'fl': 'bibcode,year'}\n response = requests.post(ads_api_url, params=params)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n codes = response.json().get('results', {}).get('docs', None)\n if codes is None:\n return []\n results = [(el.get('bibcode'), el.get('year')) for el in codes]\n affils = []\n for code, year in results:\n if year is None:\n continue\n text = rdb.get('career:{0}'.format(code))\n if text is None:\n url = ads_html_url + code\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n text = r.text\n rdb.set('career:{0}'.format(code), text)\n tree = lxml.html.fromstring(text)\n for author in tree.find_class('author'):\n if name in author.find_class('authorName')[0].text.lower():\n a = author.find_class('authorAffiliation')\n if len(a):\n affils.append((int(year), a[0].text.strip('()').strip()))\n break\n return affils\n\n\nif __name__ == '__main__':\n print(get_author_locations('foreman-mackey'))\n",
"step-5": "import os\nimport redis\nimport requests\nimport lxml.html\n\nads_api_url = \"http://adslabs.org/adsabs/api/search/\"\nads_html_url = \"http://labs.adsabs.harvard.edu/adsabs/abs/\"\n\nrdb = redis.Redis()\n\n\ndef get_dev_key():\n # Credit: Andy Casey\n ads_dev_key_filename = os.path.abspath(\n os.path.expanduser(\"~/.ads/dev_key\"))\n\n if os.path.exists(ads_dev_key_filename):\n with open(ads_dev_key_filename, \"r\") as fp:\n dev_key = fp.readline().rstrip()\n\n return dev_key\n\n dev_key = os.environ.get(\"ADS_DEV_KEY\", None)\n if dev_key is None:\n raise IOError(\"no ADS API key found in ~/.ads/dev_key\")\n return dev_key\n\n\ndef get_author_locations(author, return_json=False):\n name = sorted([a.strip() for a in author.split(\",\")], reverse=True,\n key=lambda n: len(n))[0].lower()\n\n params = {\n \"q\": \"author:{0}\".format(author),\n \"dev_key\": get_dev_key(),\n \"rows\": 200,\n \"filter\": \"database:astronomy\",\n \"fl\": \"bibcode,year\",\n }\n response = requests.post(ads_api_url, params=params)\n if response.status_code != requests.codes.ok:\n response.raise_for_status()\n\n codes = response.json().get(\"results\", {}).get(\"docs\", None)\n if codes is None:\n return []\n\n results = [(el.get(\"bibcode\"), el.get(\"year\")) for el in codes]\n\n affils = []\n for code, year in results:\n if year is None:\n continue\n\n text = rdb.get(\"career:{0}\".format(code))\n if text is None:\n url = ads_html_url + code\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n r.raise_for_status()\n text = r.text\n rdb.set(\"career:{0}\".format(code), text)\n\n tree = lxml.html.fromstring(text)\n for author in tree.find_class(\"author\"):\n if name in author.find_class(\"authorName\")[0].text.lower():\n a = author.find_class(\"authorAffiliation\")\n if len(a):\n affils.append((int(year), a[0].text.strip(\"()\").strip()))\n break\n\n return affils\n\n\nif __name__ == \"__main__\":\n print(get_author_locations(\"foreman-mackey\"))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ProcessDisplay(commands.Cog):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@commands.Cog.listener()
async def on_ready(self):
"""
Ran when bot is starting up and ready
Deletes messages from the bot in the TEXTCHANNEL
starts up find_processes method
:return:
"""
if not self.started:
channel = self.client.get_channel(TEXT_CHANNEL)
await self.delete_bot_msg(channel)
msg = await channel.send(embed=DEFAULT_EMBED)
self.find_processes.start(msg)
started = True
print('ProcessDisplay Running')
@commands.command()
@commands.has_permissions(administrator=True)
async def toggle_inline(self, ctx):
"""
Toggles inline for process controls
:param ctx: The command Context
:return:
"""
self.inline = not self.inline
@commands.command()
@commands.has_permissions(administrator=True)
async def move_process(self, direction, process_name):
"""
need to make
:param direction:
:param process_name:
:return:
"""
for i in range(len(PROCESSES)):
if PROCESSES[i] == process_name:
if direction.lower() == 'up':
pass
@commands.command()
@commands.has_permissions(administrator=True)
async def add_process(self, ctx, process, name):
"""
Adds a process to the process display.
Must be different from ones currently displayed.
:param ctx: Context of the command
:param process: The process (e.g. 'cmd.exe') to be added
:param name: The name to be displayed for the process (e.g. 'Command Prompt')
:return:
"""
name = self.fix_emoji_escapes(name)
if process in PROCESSES.keys():
await ctx.send(f'The process {process} is already being displayed')
elif name in PROCESSES.values():
await ctx.send(
f'The process name {name} is already being displayed')
else:
PROCESSES[process] = name
self.update_processes_config()
await ctx.send(f'The process {name} has been added')
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_process(self, ctx, *name):
"""
Removes a process from the process display
:param ctx: Context of the command
:param name: Name displayed for the process (e.g. Command Prompt)
:return:
"""
print(name)
name = self.fix_emoji_escapes(' '.join(name))
complete = False
for process in PROCESSES.keys():
if PROCESSES.get(process) == name:
PROCESSES.pop(process)
self.update_processes_config()
await ctx.send(f'The process {name} has been removed')
complete = True
break
if not complete:
await ctx.send(f"The process {name} doesn't exist")
@commands.command()
@commands.has_permissions(administrator=True)
async def edit_process(self, ctx, old_name, new_name):
"""
Edits the name of a process
:param ctx: The context of the command
:param old_name: The old name of the process (to be changed)
:param new_name: The new name of the process (changed to)
:return:
"""
old_name = self.fix_emoji_escapes(old_name)
new_name = self.fix_emoji_escapes(new_name)
if old_name in PROCESSES.values():
for process in PROCESSES:
if PROCESSES.get(process) == old_name:
PROCESSES.update({process: new_name})
self.update_processes_config()
else:
await ctx.send(f"Process name {old_name} doesn't exist")
@tasks.loop(seconds=1)
async def find_processes(self, msg):
"""
The processes with statuses are attached to the msg given
:param msg: The message to be edited with the processes
:return:
"""
running_processes = []
new_embed = DEFAULT_EMBED.copy()
for proc in psutil.process_iter():
if proc.name() in PROCESSES.keys():
running_processes.append(proc.name())
elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(
) in PROCESSES.keys():
running_processes.append(proc.cwd())
for process in PROCESSES:
try:
if process in running_processes:
new_embed.add_field(name=PROCESSES.get(process), value=
'Online <:GreenTick:592083498534174721>', inline=
self.inline)
else:
new_embed.add_field(name=PROCESSES.get(process), value=
'Offline <:RedCross:592082557961633877>', inline=
self.inline)
except PermissionError:
new_embed.add_field(name=PROCESSES.get(process), value=
'Admin Required <:OrangeUnknown:592082676891123722>',
inline=self.inline)
await msg.edit(content='', embed=new_embed)
def is_me(self, m):
"""
Checks if a messages author is the bot
:param m: tbh idk, maybe message?
:return:
"""
return m.author == self.client.user
async def delete_bot_msg(self, channel):
"""
Deletes up to the last 100 messages sent by the bot in the given channel
:param channel: The channel that will have the messages deleted
:return: the message that says how many messages were deleted
"""
await channel.purge(limit=100, check=self.is_me)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProcessDisplay(commands.Cog):
<|reserved_special_token_0|>
def __init__(self, client):
"""
:param client: the bot client parsed in from the main program
"""
self.started = False
self.client = client
self.inline = False
@commands.Cog.listener()
async def on_ready(self):
"""
Ran when bot is starting up and ready
Deletes messages from the bot in the TEXTCHANNEL
starts up find_processes method
:return:
"""
if not self.started:
channel = self.client.get_channel(TEXT_CHANNEL)
await self.delete_bot_msg(channel)
msg = await channel.send(embed=DEFAULT_EMBED)
self.find_processes.start(msg)
started = True
print('ProcessDisplay Running')
@commands.command()
@commands.has_permissions(administrator=True)
async def toggle_inline(self, ctx):
"""
Toggles inline for process controls
:param ctx: The command Context
:return:
"""
self.inline = not self.inline
@commands.command()
@commands.has_permissions(administrator=True)
async def move_process(self, direction, process_name):
"""
need to make
:param direction:
:param process_name:
:return:
"""
for i in range(len(PROCESSES)):
if PROCESSES[i] == process_name:
if direction.lower() == 'up':
pass
@commands.command()
@commands.has_permissions(administrator=True)
async def add_process(self, ctx, process, name):
"""
Adds a process to the process display.
Must be different from ones currently displayed.
:param ctx: Context of the command
:param process: The process (e.g. 'cmd.exe') to be added
:param name: The name to be displayed for the process (e.g. 'Command Prompt')
:return:
"""
name = self.fix_emoji_escapes(name)
if process in PROCESSES.keys():
await ctx.send(f'The process {process} is already being displayed')
elif name in PROCESSES.values():
await ctx.send(
f'The process name {name} is already being displayed')
else:
PROCESSES[process] = name
self.update_processes_config()
await ctx.send(f'The process {name} has been added')
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_process(self, ctx, *name):
"""
Removes a process from the process display
:param ctx: Context of the command
:param name: Name displayed for the process (e.g. Command Prompt)
:return:
"""
print(name)
name = self.fix_emoji_escapes(' '.join(name))
complete = False
for process in PROCESSES.keys():
if PROCESSES.get(process) == name:
PROCESSES.pop(process)
self.update_processes_config()
await ctx.send(f'The process {name} has been removed')
complete = True
break
if not complete:
await ctx.send(f"The process {name} doesn't exist")
@commands.command()
@commands.has_permissions(administrator=True)
async def edit_process(self, ctx, old_name, new_name):
"""
Edits the name of a process
:param ctx: The context of the command
:param old_name: The old name of the process (to be changed)
:param new_name: The new name of the process (changed to)
:return:
"""
old_name = self.fix_emoji_escapes(old_name)
new_name = self.fix_emoji_escapes(new_name)
if old_name in PROCESSES.values():
for process in PROCESSES:
if PROCESSES.get(process) == old_name:
PROCESSES.update({process: new_name})
self.update_processes_config()
else:
await ctx.send(f"Process name {old_name} doesn't exist")
@tasks.loop(seconds=1)
async def find_processes(self, msg):
"""
The processes with statuses are attached to the msg given
:param msg: The message to be edited with the processes
:return:
"""
running_processes = []
new_embed = DEFAULT_EMBED.copy()
for proc in psutil.process_iter():
if proc.name() in PROCESSES.keys():
running_processes.append(proc.name())
elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(
) in PROCESSES.keys():
running_processes.append(proc.cwd())
for process in PROCESSES:
try:
if process in running_processes:
new_embed.add_field(name=PROCESSES.get(process), value=
'Online <:GreenTick:592083498534174721>', inline=
self.inline)
else:
new_embed.add_field(name=PROCESSES.get(process), value=
'Offline <:RedCross:592082557961633877>', inline=
self.inline)
except PermissionError:
new_embed.add_field(name=PROCESSES.get(process), value=
'Admin Required <:OrangeUnknown:592082676891123722>',
inline=self.inline)
await msg.edit(content='', embed=new_embed)
def is_me(self, m):
"""
Checks if a messages author is the bot
:param m: tbh idk, maybe message?
:return:
"""
return m.author == self.client.user
async def delete_bot_msg(self, channel):
"""
Deletes up to the last 100 messages sent by the bot in the given channel
:param channel: The channel that will have the messages deleted
:return: the message that says how many messages were deleted
"""
await channel.purge(limit=100, check=self.is_me)
@staticmethod
def update_processes_config():
"""
Updates the processes line in the config with the current PROCESSES
:return:
"""
config.set('ProcessDisplay', 'processes', str(PROCESSES))
with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:
config.write(configfile)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProcessDisplay(commands.Cog):
"""
The Cog for Process Display
"""
def __init__(self, client):
"""
:param client: the bot client parsed in from the main program
"""
self.started = False
self.client = client
self.inline = False
@commands.Cog.listener()
async def on_ready(self):
"""
Ran when bot is starting up and ready
Deletes messages from the bot in the TEXTCHANNEL
starts up find_processes method
:return:
"""
if not self.started:
channel = self.client.get_channel(TEXT_CHANNEL)
await self.delete_bot_msg(channel)
msg = await channel.send(embed=DEFAULT_EMBED)
self.find_processes.start(msg)
started = True
print('ProcessDisplay Running')
@commands.command()
@commands.has_permissions(administrator=True)
async def toggle_inline(self, ctx):
"""
Toggles inline for process controls
:param ctx: The command Context
:return:
"""
self.inline = not self.inline
@commands.command()
@commands.has_permissions(administrator=True)
async def move_process(self, direction, process_name):
"""
need to make
:param direction:
:param process_name:
:return:
"""
for i in range(len(PROCESSES)):
if PROCESSES[i] == process_name:
if direction.lower() == 'up':
pass
@commands.command()
@commands.has_permissions(administrator=True)
async def add_process(self, ctx, process, name):
"""
Adds a process to the process display.
Must be different from ones currently displayed.
:param ctx: Context of the command
:param process: The process (e.g. 'cmd.exe') to be added
:param name: The name to be displayed for the process (e.g. 'Command Prompt')
:return:
"""
name = self.fix_emoji_escapes(name)
if process in PROCESSES.keys():
await ctx.send(f'The process {process} is already being displayed')
elif name in PROCESSES.values():
await ctx.send(
f'The process name {name} is already being displayed')
else:
PROCESSES[process] = name
self.update_processes_config()
await ctx.send(f'The process {name} has been added')
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_process(self, ctx, *name):
"""
Removes a process from the process display
:param ctx: Context of the command
:param name: Name displayed for the process (e.g. Command Prompt)
:return:
"""
print(name)
name = self.fix_emoji_escapes(' '.join(name))
complete = False
for process in PROCESSES.keys():
if PROCESSES.get(process) == name:
PROCESSES.pop(process)
self.update_processes_config()
await ctx.send(f'The process {name} has been removed')
complete = True
break
if not complete:
await ctx.send(f"The process {name} doesn't exist")
@commands.command()
@commands.has_permissions(administrator=True)
async def edit_process(self, ctx, old_name, new_name):
"""
Edits the name of a process
:param ctx: The context of the command
:param old_name: The old name of the process (to be changed)
:param new_name: The new name of the process (changed to)
:return:
"""
old_name = self.fix_emoji_escapes(old_name)
new_name = self.fix_emoji_escapes(new_name)
if old_name in PROCESSES.values():
for process in PROCESSES:
if PROCESSES.get(process) == old_name:
PROCESSES.update({process: new_name})
self.update_processes_config()
else:
await ctx.send(f"Process name {old_name} doesn't exist")
@tasks.loop(seconds=1)
async def find_processes(self, msg):
"""
The processes with statuses are attached to the msg given
:param msg: The message to be edited with the processes
:return:
"""
running_processes = []
new_embed = DEFAULT_EMBED.copy()
for proc in psutil.process_iter():
if proc.name() in PROCESSES.keys():
running_processes.append(proc.name())
elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(
) in PROCESSES.keys():
running_processes.append(proc.cwd())
for process in PROCESSES:
try:
if process in running_processes:
new_embed.add_field(name=PROCESSES.get(process), value=
'Online <:GreenTick:592083498534174721>', inline=
self.inline)
else:
new_embed.add_field(name=PROCESSES.get(process), value=
'Offline <:RedCross:592082557961633877>', inline=
self.inline)
except PermissionError:
new_embed.add_field(name=PROCESSES.get(process), value=
'Admin Required <:OrangeUnknown:592082676891123722>',
inline=self.inline)
await msg.edit(content='', embed=new_embed)
def is_me(self, m):
"""
Checks if a messages author is the bot
:param m: tbh idk, maybe message?
:return:
"""
return m.author == self.client.user
async def delete_bot_msg(self, channel):
"""
Deletes up to the last 100 messages sent by the bot in the given channel
:param channel: The channel that will have the messages deleted
:return: the message that says how many messages were deleted
"""
await channel.purge(limit=100, check=self.is_me)
@staticmethod
def update_processes_config():
"""
Updates the processes line in the config with the current PROCESSES
:return:
"""
config.set('ProcessDisplay', 'processes', str(PROCESSES))
with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:
config.write(configfile)
@staticmethod
def fix_emoji_escapes(text):
"""
Fixes emoji escapes to add the < back on
:param text: The text that needs to be checked for an escape
:return: the fixed text
"""
new_text = text.split(':')
for i in range(2, len(new_text)):
if '>' in new_text[i]:
new_text[i - 2] += '<'
return ':'.join(new_text)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Jack Draper'
__copyright__ = 'Unofficial Copyright 2019, CyclopsBot'
__credits__ = ['Jack Draper']
__license__ = 'Developer'
__version__ = '0.0.4'
__maintainer__ = 'Jack Draper'
__email__ = 'thejaydwee@gmail.com'
__status__ = 'Development'
CONFIG_PATH = './configs/config.ini'
DEFAULT_EMBED = discord.Embed(title=':desktop: Program Status', colour=
discord.Colour.blue())
if not os.path.exists('./configs/config.ini'):
print('No config file can be found in ./configs/.')
sys.exit('No config found.')
config = configparser.ConfigParser()
try:
config.read_file(codecs.open(CONFIG_PATH, 'r', 'utf-8-sig'))
except FileNotFoundError:
try:
print('You need to set up the config file correctly.')
except shutil.Error:
print(
'Something is wrong with the default config file or the config folder.'
)
time.sleep(4)
sys.exit()
ADMIN_ROLE = config['Credentials']['admin_role']
TEXT_CHANNEL = int(config['ProcessDisplay']['text_channel_id'])
PROCESSES = eval(config['ProcessDisplay']['processes'])
class ProcessDisplay(commands.Cog):
"""
The Cog for Process Display
"""
def __init__(self, client):
"""
:param client: the bot client parsed in from the main program
"""
self.started = False
self.client = client
self.inline = False
@commands.Cog.listener()
async def on_ready(self):
"""
Ran when bot is starting up and ready
Deletes messages from the bot in the TEXTCHANNEL
starts up find_processes method
:return:
"""
if not self.started:
channel = self.client.get_channel(TEXT_CHANNEL)
await self.delete_bot_msg(channel)
msg = await channel.send(embed=DEFAULT_EMBED)
self.find_processes.start(msg)
started = True
print('ProcessDisplay Running')
@commands.command()
@commands.has_permissions(administrator=True)
async def toggle_inline(self, ctx):
"""
Toggles inline for process controls
:param ctx: The command Context
:return:
"""
self.inline = not self.inline
@commands.command()
@commands.has_permissions(administrator=True)
async def move_process(self, direction, process_name):
"""
need to make
:param direction:
:param process_name:
:return:
"""
for i in range(len(PROCESSES)):
if PROCESSES[i] == process_name:
if direction.lower() == 'up':
pass
@commands.command()
@commands.has_permissions(administrator=True)
async def add_process(self, ctx, process, name):
"""
Adds a process to the process display.
Must be different from ones currently displayed.
:param ctx: Context of the command
:param process: The process (e.g. 'cmd.exe') to be added
:param name: The name to be displayed for the process (e.g. 'Command Prompt')
:return:
"""
name = self.fix_emoji_escapes(name)
if process in PROCESSES.keys():
await ctx.send(f'The process {process} is already being displayed')
elif name in PROCESSES.values():
await ctx.send(
f'The process name {name} is already being displayed')
else:
PROCESSES[process] = name
self.update_processes_config()
await ctx.send(f'The process {name} has been added')
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_process(self, ctx, *name):
"""
Removes a process from the process display
:param ctx: Context of the command
:param name: Name displayed for the process (e.g. Command Prompt)
:return:
"""
print(name)
name = self.fix_emoji_escapes(' '.join(name))
complete = False
for process in PROCESSES.keys():
if PROCESSES.get(process) == name:
PROCESSES.pop(process)
self.update_processes_config()
await ctx.send(f'The process {name} has been removed')
complete = True
break
if not complete:
await ctx.send(f"The process {name} doesn't exist")
@commands.command()
@commands.has_permissions(administrator=True)
async def edit_process(self, ctx, old_name, new_name):
"""
Edits the name of a process
:param ctx: The context of the command
:param old_name: The old name of the process (to be changed)
:param new_name: The new name of the process (changed to)
:return:
"""
old_name = self.fix_emoji_escapes(old_name)
new_name = self.fix_emoji_escapes(new_name)
if old_name in PROCESSES.values():
for process in PROCESSES:
if PROCESSES.get(process) == old_name:
PROCESSES.update({process: new_name})
self.update_processes_config()
else:
await ctx.send(f"Process name {old_name} doesn't exist")
@tasks.loop(seconds=1)
async def find_processes(self, msg):
"""
The processes with statuses are attached to the msg given
:param msg: The message to be edited with the processes
:return:
"""
running_processes = []
new_embed = DEFAULT_EMBED.copy()
for proc in psutil.process_iter():
if proc.name() in PROCESSES.keys():
running_processes.append(proc.name())
elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(
) in PROCESSES.keys():
running_processes.append(proc.cwd())
for process in PROCESSES:
try:
if process in running_processes:
new_embed.add_field(name=PROCESSES.get(process), value=
'Online <:GreenTick:592083498534174721>', inline=
self.inline)
else:
new_embed.add_field(name=PROCESSES.get(process), value=
'Offline <:RedCross:592082557961633877>', inline=
self.inline)
except PermissionError:
new_embed.add_field(name=PROCESSES.get(process), value=
'Admin Required <:OrangeUnknown:592082676891123722>',
inline=self.inline)
await msg.edit(content='', embed=new_embed)
def is_me(self, m):
"""
Checks if a messages author is the bot
:param m: tbh idk, maybe message?
:return:
"""
return m.author == self.client.user
async def delete_bot_msg(self, channel):
"""
Deletes up to the last 100 messages sent by the bot in the given channel
:param channel: The channel that will have the messages deleted
:return: the message that says how many messages were deleted
"""
await channel.purge(limit=100, check=self.is_me)
@staticmethod
def update_processes_config():
"""
Updates the processes line in the config with the current PROCESSES
:return:
"""
config.set('ProcessDisplay', 'processes', str(PROCESSES))
with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:
config.write(configfile)
@staticmethod
def fix_emoji_escapes(text):
"""
Fixes emoji escapes to add the < back on
:param text: The text that needs to be checked for an escape
:return: the fixed text
"""
new_text = text.split(':')
for i in range(2, len(new_text)):
if '>' in new_text[i]:
new_text[i - 2] += '<'
return ':'.join(new_text)
def setup(client):
"""
Ran on setup of the Cog
:param client: The bot client
:return:
"""
client.add_cog(ProcessDisplay(client))
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
This is a Cog used to display processes/ programs running on the client to a discord text channel
Commented using reStructuredText (reST)
ToDo
create and use a database for multiple servers
"""
# Futures
# Built-in/Generic Imports
import os
import sys
import configparser
import shutil
import time
import codecs
# Libs
import discord
import psutil
from discord.ext import commands, tasks
# Own modules
__author__ = "Jack Draper"
__copyright__ = "Unofficial Copyright 2019, CyclopsBot"
__credits__ = ["Jack Draper"]
__license__ = "Developer"
__version__ = "0.0.4"
__maintainer__ = "Jack Draper"
__email__ = "thejaydwee@gmail.com"
__status__ = "Development"
# "Prototype", "Development", or "Production"
# Constants
CONFIG_PATH = "./configs/config.ini"
DEFAULT_EMBED = discord.Embed(
title=":desktop: Program Status",
colour=discord.Colour.blue()
)
# Checks for config file
if not os.path.exists("./configs/config.ini"):
print("No config file can be found in ./configs/.")
sys.exit("No config found.")
# Runs config file
config = configparser.ConfigParser()
try:
# config.read(os.path.abspath("./configs/config.ini"))
config.read_file(codecs.open(CONFIG_PATH, "r", "utf-8-sig"))
except FileNotFoundError:
try:
# shutil.copyfile("./configs/default_config.ini", "./configs/config.ini")
print("You need to set up the config file correctly.")
except shutil.Error:
print("Something is wrong with the default config file or the config folder.")
time.sleep(4)
sys.exit()
# Config Constants
ADMIN_ROLE = config["Credentials"]["admin_role"]
TEXT_CHANNEL = int(config["ProcessDisplay"]["text_channel_id"])
PROCESSES = eval(config["ProcessDisplay"]["processes"])
class ProcessDisplay(commands.Cog):
"""
The Cog for Process Display
"""
def __init__(self, client):
"""
:param client: the bot client parsed in from the main program
"""
self.started = False
self.client = client
self.inline = False
# Events
@commands.Cog.listener()
async def on_ready(self):
"""
Ran when bot is starting up and ready
Deletes messages from the bot in the TEXTCHANNEL
starts up find_processes method
:return:
"""
if not self.started:
channel = self.client.get_channel(TEXT_CHANNEL)
await self.delete_bot_msg(channel)
msg = await channel.send(embed=DEFAULT_EMBED)
self.find_processes.start(msg)
started = True
print("ProcessDisplay Running")
# Commands
@commands.command()
@commands.has_permissions(administrator=True)
async def toggle_inline(self,ctx):
"""
Toggles inline for process controls
:param ctx: The command Context
:return:
"""
self.inline = not self.inline
@commands.command()
@commands.has_permissions(administrator=True)
async def move_process(self, direction, process_name):
"""
need to make
:param direction:
:param process_name:
:return:
"""
for i in range(len(PROCESSES)):
if PROCESSES[i] == process_name:
if direction.lower() == "up":
pass
@commands.command()
@commands.has_permissions(administrator=True)
async def add_process(self, ctx, process, name):
"""
Adds a process to the process display.
Must be different from ones currently displayed.
:param ctx: Context of the command
:param process: The process (e.g. 'cmd.exe') to be added
:param name: The name to be displayed for the process (e.g. 'Command Prompt')
:return:
"""
name = self.fix_emoji_escapes(name)
if process in PROCESSES.keys():
await ctx.send(f"The process {process} is already being displayed")
elif name in PROCESSES.values():
await ctx.send(f"The process name {name} is already being displayed")
else:
PROCESSES[process] = name
self.update_processes_config()
await ctx.send(f"The process {name} has been added")
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_process(self, ctx, *name):
"""
Removes a process from the process display
:param ctx: Context of the command
:param name: Name displayed for the process (e.g. Command Prompt)
:return:
"""
print(name)
name = self.fix_emoji_escapes(" ".join(name))
complete = False
for process in PROCESSES.keys():
if PROCESSES.get(process) == name:
PROCESSES.pop(process)
self.update_processes_config()
await ctx.send(f"The process {name} has been removed")
complete = True
break
if not complete:
await ctx.send(f"The process {name} doesn't exist")
@commands.command()
@commands.has_permissions(administrator=True)
async def edit_process(self, ctx, old_name, new_name):
"""
Edits the name of a process
:param ctx: The context of the command
:param old_name: The old name of the process (to be changed)
:param new_name: The new name of the process (changed to)
:return:
"""
old_name = self.fix_emoji_escapes(old_name)
new_name = self.fix_emoji_escapes(new_name)
if old_name in PROCESSES.values():
for process in PROCESSES:
if PROCESSES.get(process) == old_name:
PROCESSES.update({process: new_name})
self.update_processes_config()
else:
await ctx.send(f"Process name {old_name} doesn't exist")
@tasks.loop(seconds=1)
async def find_processes(self, msg):
"""
The processes with statuses are attached to the msg given
:param msg: The message to be edited with the processes
:return:
"""
running_processes = []
new_embed = DEFAULT_EMBED.copy()
for proc in psutil.process_iter():
if proc.name() in PROCESSES.keys():
running_processes.append(proc.name())
elif proc.name() in ["java.exe", "javaw.exe"] and proc.cwd() in PROCESSES.keys():
running_processes.append(proc.cwd())
for process in PROCESSES:
try:
if process in running_processes:
new_embed.add_field(name=PROCESSES.get(process),
value="Online <:GreenTick:592083498534174721>", inline=self.inline)
else:
new_embed.add_field(name=PROCESSES.get(process),
value="Offline <:RedCross:592082557961633877>", inline=self.inline)
except PermissionError:
new_embed.add_field(name=PROCESSES.get(process),
value="Admin Required <:OrangeUnknown:592082676891123722>", inline=self.inline)
await msg.edit(content="", embed=new_embed)
def is_me(self, m):
"""
Checks if a messages author is the bot
:param m: tbh idk, maybe message?
:return:
"""
return m.author == self.client.user
async def delete_bot_msg(self, channel):
"""
Deletes up to the last 100 messages sent by the bot in the given channel
:param channel: The channel that will have the messages deleted
:return: the message that says how many messages were deleted
"""
await channel.purge(limit=100, check=self.is_me)
@staticmethod
def update_processes_config():
"""
Updates the processes line in the config with the current PROCESSES
:return:
"""
config.set("ProcessDisplay", "processes", str(PROCESSES))
with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:
config.write(configfile)
@staticmethod
def fix_emoji_escapes(text):
"""
Fixes emoji escapes to add the < back on
:param text: The text that needs to be checked for an escape
:return: the fixed text
"""
new_text = text.split(":")
for i in range(2, len(new_text)):
if ">" in new_text[i]:
new_text[i-2] += "<"
return ":".join(new_text)
def setup(client):
"""
Ran on setup of the Cog
:param client: The bot client
:return:
"""
client.add_cog(ProcessDisplay(client))
|
flexible
|
{
"blob_id": "8dfef0a4525328be8dfb4723f0a168dc22eb5eb2",
"index": 520,
"step-1": "<mask token>\n\n\nclass ProcessDisplay(commands.Cog):\n <mask token>\n <mask token>\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProcessDisplay(commands.Cog):\n <mask token>\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\n<mask token>\n",
"step-4": "<mask token>\n__author__ = 'Jack Draper'\n__copyright__ = 'Unofficial Copyright 2019, CyclopsBot'\n__credits__ = ['Jack Draper']\n__license__ = 'Developer'\n__version__ = '0.0.4'\n__maintainer__ = 'Jack Draper'\n__email__ = 'thejaydwee@gmail.com'\n__status__ = 'Development'\nCONFIG_PATH = './configs/config.ini'\nDEFAULT_EMBED = discord.Embed(title=':desktop: Program Status', colour=\n discord.Colour.blue())\nif not os.path.exists('./configs/config.ini'):\n print('No config file can be found in ./configs/.')\n sys.exit('No config found.')\nconfig = configparser.ConfigParser()\ntry:\n config.read_file(codecs.open(CONFIG_PATH, 'r', 'utf-8-sig'))\nexcept FileNotFoundError:\n try:\n print('You need to set up the config file correctly.')\n except shutil.Error:\n print(\n 'Something is wrong with the default config file or the config folder.'\n )\n time.sleep(4)\n sys.exit()\nADMIN_ROLE = config['Credentials']['admin_role']\nTEXT_CHANNEL = int(config['ProcessDisplay']['text_channel_id'])\nPROCESSES = eval(config['ProcessDisplay']['processes'])\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"\nThis is a Cog used to display processes/ programs running on the client to a discord text channel\n\nCommented using reStructuredText (reST)\n\nToDo\n create and use a database for multiple servers\n\"\"\"\n# Futures\n\n# Built-in/Generic Imports\nimport os\nimport sys\nimport configparser\nimport shutil\nimport time\nimport codecs\n\n# Libs\nimport discord\nimport psutil\nfrom discord.ext import commands, tasks\n\n# Own modules\n\n__author__ = \"Jack Draper\"\n__copyright__ = \"Unofficial Copyright 2019, CyclopsBot\"\n__credits__ = [\"Jack Draper\"]\n__license__ = \"Developer\"\n__version__ = \"0.0.4\"\n__maintainer__ = \"Jack Draper\"\n__email__ = \"thejaydwee@gmail.com\"\n__status__ = \"Development\"\n# \"Prototype\", \"Development\", or \"Production\"\n\n# Constants\nCONFIG_PATH = \"./configs/config.ini\"\nDEFAULT_EMBED = discord.Embed(\n title=\":desktop: Program Status\",\n colour=discord.Colour.blue()\n )\n\n\n# Checks for config file\nif not os.path.exists(\"./configs/config.ini\"):\n print(\"No config file can be found in ./configs/.\")\n sys.exit(\"No config found.\")\n# Runs config file\nconfig = configparser.ConfigParser()\ntry:\n # config.read(os.path.abspath(\"./configs/config.ini\"))\n config.read_file(codecs.open(CONFIG_PATH, \"r\", \"utf-8-sig\"))\nexcept FileNotFoundError:\n try:\n # shutil.copyfile(\"./configs/default_config.ini\", \"./configs/config.ini\")\n print(\"You need to set up the config file correctly.\")\n except shutil.Error:\n print(\"Something is wrong with the default config file or the config folder.\")\n time.sleep(4)\n sys.exit()\n\n# Config Constants\nADMIN_ROLE = config[\"Credentials\"][\"admin_role\"]\nTEXT_CHANNEL = int(config[\"ProcessDisplay\"][\"text_channel_id\"])\nPROCESSES = eval(config[\"ProcessDisplay\"][\"processes\"])\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n # Events\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print(\"ProcessDisplay Running\")\n\n # Commands\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self,ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == \"up\":\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f\"The process {process} is already being displayed\")\n elif name in PROCESSES.values():\n await ctx.send(f\"The process name {name} is already being displayed\")\n\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been added\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(\" \".join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been removed\")\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n\n config.set(\"ProcessDisplay\", \"processes\", str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(\":\")\n for i in range(2, len(new_text)):\n if \">\" in new_text[i]:\n new_text[i-2] += \"<\"\n return \":\".join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"step-ids": [
2,
4,
6,
9,
11
]
}
|
[
2,
4,
6,
9,
11
] |
<|reserved_special_token_0|>
class CouponForm(forms.ModelForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_(
'Sorry, that coupon type cannot be found.'))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CouponForm(forms.ModelForm):
name = forms.CharField(max_length=64, label=_('Name'), required=True)
type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)
data = forms.CharField(max_length=64, required=False)
style = forms.CharField(max_length=64, required=False)
valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={
'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[
'%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)
valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={
'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[
'%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_(
'Sorry, that coupon type cannot be found.'))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
COUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',
_('questionnaire'))
class CouponForm(forms.ModelForm):
name = forms.CharField(max_length=64, label=_('Name'), required=True)
type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)
data = forms.CharField(max_length=64, required=False)
style = forms.CharField(max_length=64, required=False)
valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={
'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[
'%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)
valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={
'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[
'%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_(
'Sorry, that coupon type cannot be found.'))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'
<|reserved_special_token_1|>
from django import forms
from django.utils.translation import ugettext_lazy as _
from apps.qa.models.coupon import Coupon
from apps.qa.models.coupon_type import CouponType
COUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',
_('questionnaire'))
class CouponForm(forms.ModelForm):
name = forms.CharField(max_length=64, label=_('Name'), required=True)
type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)
data = forms.CharField(max_length=64, required=False)
style = forms.CharField(max_length=64, required=False)
valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={
'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[
'%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)
valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={
'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[
'%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_(
'Sorry, that coupon type cannot be found.'))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
# import models
from apps.qa.models.coupon import Coupon
from apps.qa.models.coupon_type import CouponType
COUPONTYPE_CHOICES = (
('text', _("text")),
('url', _("url")),
('questionnaire', _("questionnaire")),
)
class CouponForm(forms.ModelForm):
name = forms.CharField(max_length=64, label=_("Name"), required=True)
type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)
# these fields are JSON containers populated by custom BL
data = forms.CharField(max_length=64, required=False)
style = forms.CharField(max_length=64, required=False)
valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),
input_formats=['%Y-%m-%d %H:%M', ],
label=_("Valid From"),
required=True)
valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),
input_formats=['%Y-%m-%d %H:%M', ],
label=_("Valid Until"),
required=True)
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_("Sorry, that coupon type cannot be found."))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = ('name', 'type', 'data', 'style', 'valid_from', 'valid_until')
|
flexible
|
{
"blob_id": "a0f83f0a2c6ddaa2fc641bd4fa48a6f50fd1d978",
"index": 1755,
"step-1": "<mask token>\n\n\nclass CouponForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-2": "<mask token>\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-3": "<mask token>\nCOUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',\n _('questionnaire'))\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-4": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.qa.models.coupon import Coupon\nfrom apps.qa.models.coupon_type import CouponType\nCOUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',\n _('questionnaire'))\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\n# import models\nfrom apps.qa.models.coupon import Coupon\nfrom apps.qa.models.coupon_type import CouponType\n\n\nCOUPONTYPE_CHOICES = (\n ('text', _(\"text\")),\n ('url', _(\"url\")),\n ('questionnaire', _(\"questionnaire\")),\n)\n\nclass CouponForm(forms.ModelForm):\n\n name = forms.CharField(max_length=64, label=_(\"Name\"), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n\n # these fields are JSON containers populated by custom BL\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),\n input_formats=['%Y-%m-%d %H:%M', ],\n label=_(\"Valid From\"),\n required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),\n input_formats=['%Y-%m-%d %H:%M', ],\n label=_(\"Valid Until\"),\n required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\"Sorry, that coupon type cannot be found.\"))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n class Meta:\n model = Coupon\n fields = ('name', 'type', 'data', 'style', 'valid_from', 'valid_until')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# class User:
# def __init__(self, name_parameter, email_parameter):
# self.nameofPerson = name_parameter
# self.emailofPerson = email_parameter
# self.account_balance = 0
# def depositMoney(self, amount);
# self.account_balance += amount
# return self
# def transferMoney(self, otherUser, amount):
# self.account_balance -= 5
# otherUser.account_balance += 5
# return self
# To allow user1, user2 or user3 overdraw their account
# def withdrawMoney_overdraw(self, amount):
# self.account_balance -= amount
# To not allow user1, user2, user3 overdraw their account
# def withdrawMoney_no_overdraw(self, amount):
# if self.account_balance > amount:
# self.account_balance -= amount
# else:
# print("insufficient funds")
# user1 = User("Ben", "benjamin@yahoo.com")
# user2 = User("Tom", "tommy@yahoo.com")
# user3 = User("Sarah", "sarah@yahoo.com")
# print(user1.nameofPerson)
# prints the name of the user1
# print(user2.emailfPerson)
# prints the email of the user2
# print(user1.account_balance)
# prints the account balance of the user3 which in this case is 0 by default according to the class User
# user1.depositMoney(50)
# print(user1.account_balance)
# prints the account balance of user1 which by default is 0 and then adds the function depositMoney which is giving an arguemnt of 50 (0 + 50 / account_balance + depositMoney) The output is ($50)
# user1.transferMoney(user 2, 5)
# print(user2.account_balance)
# print(user1.account_balance)
# prints user1 account balance which is now 50 then transfers money to user2 (50 - 5) which is now 5 to be added to the default account balance of 0 (0 + 5 / account_balance + transferMoney from user1)
# Also user1 account_balance (50 - 5) which is now 45 ($45)
# user1.depositMoney(50).depositMoney(30).transferMoney(user2, 5)
# print(user1.account_balance)
# prints user1 account banlace (50 + 30) which is 80 ($80), assuming user1 depositedMoney twice. we use "return self" at the end of the declared functions to add a "chain method" of repeating a chain of function or various types of functions, i.e repeating a particular function for user1 as many times as possible or even adding other functions to modify the final account balance for user 1)
# The final output for account balance user1 will be (80 - 50) which is 75 ($75) because we transfered money 5 (80 - 5) to user2 at the end in the "chain mathod of functions" for user1. This will only work since we added "return self"and this means (updating all the chain methods to the very last function of command in the declared function which in this case we tranfered 5 from user1 to user2)
# user1.withdrawMoney_overdraw(100)
# print(user1.account_balance)
# prints user1 current account balance which is currently 75 and then withdraws 100 which means (75 - 100) which is -25.
# the new user1 account balance is ( -25 which is -$25)
# The above assuming user1 is allowed to overdraw their account
# user1.withdrawMoney_no_overdraw(100)
# print(user1.account_balance)
# prints "insufficient funds" for user1 since user1 current account balance which is currently 75 and then wants to withdraw 100 which means (75 - 100) but is not allowed to because user1 still needs an additional 25 to fulfil the withdrawMoney function of 100. we give a conditional statement in our def withdrawMoney_no_overdraw above saying if user1 account balance is greater than amount allow user1 to withdraw money if not do not allow user1 to redraw money instead give "insuffiecient funds" (if 75 is greater than 100 which in this case is false go to the else statement which is "insufficient funds")
# The above assuming user1 is not allowed to overdraw their account if account balance for user1 is not greater than the withdraw amount and then user1 will get a message "insufficient funds"
|
normal
|
{
"blob_id": "c69dcffc06146af610a7976e522b6e35cabde1aa",
"index": 3050,
"step-1": "# class User:\n# def __init__(self, name_parameter, email_parameter):\n# self.nameofPerson = name_parameter\n# self.emailofPerson = email_parameter\n# self.account_balance = 0\n\n# def depositMoney(self, amount);\n# self.account_balance += amount\n# return self\n\n# def transferMoney(self, otherUser, amount):\n# self.account_balance -= 5\n# otherUser.account_balance += 5\n# return self\n\n# To allow user1, user2 or user3 overdraw their account\n # def withdrawMoney_overdraw(self, amount):\n # self.account_balance -= amount\n\n# To not allow user1, user2, user3 overdraw their account\n# def withdrawMoney_no_overdraw(self, amount):\n# if self.account_balance > amount:\n# self.account_balance -= amount\n# else:\n# print(\"insufficient funds\")\n\n\n# user1 = User(\"Ben\", \"benjamin@yahoo.com\")\n# user2 = User(\"Tom\", \"tommy@yahoo.com\")\n# user3 = User(\"Sarah\", \"sarah@yahoo.com\")\n\n# print(user1.nameofPerson)\n# prints the name of the user1\n\n# print(user2.emailfPerson)\n# prints the email of the user2\n\n# print(user1.account_balance)\n# prints the account balance of the user3 which in this case is 0 by default according to the class User\n\n# user1.depositMoney(50)\n# print(user1.account_balance)\n# prints the account balance of user1 which by default is 0 and then adds the function depositMoney which is giving an arguemnt of 50 (0 + 50 / account_balance + depositMoney) The output is ($50)\n\n# user1.transferMoney(user 2, 5)\n# print(user2.account_balance)\n# print(user1.account_balance)\n# prints user1 account balance which is now 50 then transfers money to user2 (50 - 5) which is now 5 to be added to the default account balance of 0 (0 + 5 / account_balance + transferMoney from user1)\n# Also user1 account_balance (50 - 5) which is now 45 ($45)\n\n# user1.depositMoney(50).depositMoney(30).transferMoney(user2, 5)\n# print(user1.account_balance)\n# prints user1 account banlace (50 + 30) which is 80 ($80), assuming user1 depositedMoney twice. we use \"return self\" at the end of the declared functions to add a \"chain method\" of repeating a chain of function or various types of functions, i.e repeating a particular function for user1 as many times as possible or even adding other functions to modify the final account balance for user 1)\n# The final output for account balance user1 will be (80 - 50) which is 75 ($75) because we transfered money 5 (80 - 5) to user2 at the end in the \"chain mathod of functions\" for user1. This will only work since we added \"return self\"and this means (updating all the chain methods to the very last function of command in the declared function which in this case we tranfered 5 from user1 to user2)\n\n# user1.withdrawMoney_overdraw(100)\n# print(user1.account_balance)\n# prints user1 current account balance which is currently 75 and then withdraws 100 which means (75 - 100) which is -25.\n# the new user1 account balance is ( -25 which is -$25)\n# The above assuming user1 is allowed to overdraw their account\n\n# user1.withdrawMoney_no_overdraw(100)\n# print(user1.account_balance)\n# prints \"insufficient funds\" for user1 since user1 current account balance which is currently 75 and then wants to withdraw 100 which means (75 - 100) but is not allowed to because user1 still needs an additional 25 to fulfil the withdrawMoney function of 100. we give a conditional statement in our def withdrawMoney_no_overdraw above saying if user1 account balance is greater than amount allow user1 to withdraw money if not do not allow user1 to redraw money instead give \"insuffiecient funds\" (if 75 is greater than 100 which in this case is false go to the else statement which is \"insufficient funds\")\n# The above assuming user1 is not allowed to overdraw their account if account balance for user1 is not greater than the withdraw amount and then user1 will get a message \"insufficient funds\"\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
class RatesAdmin(admin.ModelAdmin):
list_filter = ['c_code_id', 'upd_id']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CurrencyAdmin(admin.ModelAdmin):
pass
class UpdAdmin(admin.ModelAdmin):
pass
class RatesAdmin(admin.ModelAdmin):
list_filter = ['c_code_id', 'upd_id']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CurrencyAdmin(admin.ModelAdmin):
pass
class UpdAdmin(admin.ModelAdmin):
pass
class RatesAdmin(admin.ModelAdmin):
list_filter = ['c_code_id', 'upd_id']
admin.site.register(Currency, CurrencyAdmin)
admin.site.register(UpdateInfo, UpdAdmin)
admin.site.register(Rates, RatesAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from xchanger.models import Currency, Rates, UpdateInfo
class CurrencyAdmin(admin.ModelAdmin):
pass
class UpdAdmin(admin.ModelAdmin):
pass
class RatesAdmin(admin.ModelAdmin):
list_filter = ['c_code_id', 'upd_id']
admin.site.register(Currency, CurrencyAdmin)
admin.site.register(UpdateInfo, UpdAdmin)
admin.site.register(Rates, RatesAdmin)
|
flexible
|
{
"blob_id": "20ccdd319bfbbb4f17e8518eb60d125112c05d8e",
"index": 6828,
"step-1": "<mask token>\n\n\nclass RatesAdmin(admin.ModelAdmin):\n list_filter = ['c_code_id', 'upd_id']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n pass\n\n\nclass UpdAdmin(admin.ModelAdmin):\n pass\n\n\nclass RatesAdmin(admin.ModelAdmin):\n list_filter = ['c_code_id', 'upd_id']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n pass\n\n\nclass UpdAdmin(admin.ModelAdmin):\n pass\n\n\nclass RatesAdmin(admin.ModelAdmin):\n list_filter = ['c_code_id', 'upd_id']\n\n\nadmin.site.register(Currency, CurrencyAdmin)\nadmin.site.register(UpdateInfo, UpdAdmin)\nadmin.site.register(Rates, RatesAdmin)\n",
"step-4": "from django.contrib import admin\nfrom xchanger.models import Currency, Rates, UpdateInfo\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n pass\n\n\nclass UpdAdmin(admin.ModelAdmin):\n pass\n\n\nclass RatesAdmin(admin.ModelAdmin):\n list_filter = ['c_code_id', 'upd_id']\n\n\nadmin.site.register(Currency, CurrencyAdmin)\nadmin.site.register(UpdateInfo, UpdAdmin)\nadmin.site.register(Rates, RatesAdmin)\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QComboBox
class ChoiceTargetNumbers(QWidget):
"""Виджет с выбором номеров целей"""
def __init__(self, parent=None) -> None:
QWidget.__init__(self, parent)
# Нужные компоненты
label = QLabel(text="Выберите номера целей:")
self.first_target_number_combo_box = QComboBox()
self.first_target_number_combo_box.addItems(["1", "2", "3"])
self.second_target_number_combo_box = QComboBox()
self.second_target_number_combo_box.addItems(["1", "2", "3"])
# Основной контейнер
layout = QHBoxLayout(self)
layout.addWidget(label)
layout.addWidget(self.first_target_number_combo_box)
layout.addWidget(self.second_target_number_combo_box)
|
normal
|
{
"blob_id": "291cd789ac3ab7b794be8feafe0f608ad0c081d7",
"index": 9674,
"step-1": "<mask token>\n\n\nclass ChoiceTargetNumbers(QWidget):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ChoiceTargetNumbers(QWidget):\n <mask token>\n\n def __init__(self, parent=None) ->None:\n QWidget.__init__(self, parent)\n label = QLabel(text='Выберите номера целей:')\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems(['1', '2', '3'])\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems(['1', '2', '3'])\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-3": "<mask token>\n\n\nclass ChoiceTargetNumbers(QWidget):\n \"\"\"Виджет с выбором номеров целей\"\"\"\n\n def __init__(self, parent=None) ->None:\n QWidget.__init__(self, parent)\n label = QLabel(text='Выберите номера целей:')\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems(['1', '2', '3'])\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems(['1', '2', '3'])\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-4": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QComboBox\n\n\nclass ChoiceTargetNumbers(QWidget):\n \"\"\"Виджет с выбором номеров целей\"\"\"\n\n def __init__(self, parent=None) ->None:\n QWidget.__init__(self, parent)\n label = QLabel(text='Выберите номера целей:')\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems(['1', '2', '3'])\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems(['1', '2', '3'])\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-5": "from PyQt5.QtWidgets import QWidget, QHBoxLayout, QLabel, QComboBox\n\n\nclass ChoiceTargetNumbers(QWidget):\n \"\"\"Виджет с выбором номеров целей\"\"\"\n def __init__(self, parent=None) -> None:\n QWidget.__init__(self, parent)\n\n # Нужные компоненты\n label = QLabel(text=\"Выберите номера целей:\")\n\n self.first_target_number_combo_box = QComboBox()\n self.first_target_number_combo_box.addItems([\"1\", \"2\", \"3\"])\n\n self.second_target_number_combo_box = QComboBox()\n self.second_target_number_combo_box.addItems([\"1\", \"2\", \"3\"])\n\n # Основной контейнер\n layout = QHBoxLayout(self)\n layout.addWidget(label)\n layout.addWidget(self.first_target_number_combo_box)\n layout.addWidget(self.second_target_number_combo_box)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def get_data():
class_list = []
with open('counts.tsv') as fd:
for line in fd.read().splitlines():
class_data = line.split('\t')
class_list.append(class_data)
return class_list
def get_fall_2016():
directory = get_data()
fall_2016_list = []
for n in directory:
if n[0] == '2016' and n[1] == 'fall':
fall_2016_list.append(n)
return fall_2016_list
def get_fall_2016_core(core):
directory = get_fall_2016()
core_satisfied_list = []
for n in directory:
core_possible = n[9].split(';')
if core in core_possible:
core_satisfied_list.append(n)
return core_satisfied_list
<|reserved_special_token_0|>
@app.route('/CPAF')
def display_random_CPAF():
courses = get_fall_2016_core('CPAF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAS')
def display_random_CPAS():
courses = get_fall_2016_core('CPAS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
<|reserved_special_token_0|>
@app.route('/CFAP')
def display_random_CFAP():
courses = get_fall_2016_core('CFAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
<|reserved_special_token_0|>
@app.route('/CPLS')
def display_random_CPLS():
courses = get_fall_2016_core('CPLS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLA')
def display_random_CPLA():
courses = get_fall_2016_core('CPLA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPMS')
def display_random_CPMS():
courses = get_fall_2016_core('CPMS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPPE')
def display_random_CPPE():
courses = get_fall_2016_core('CPPE')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPRF')
def display_random_CPRF():
courses = get_fall_2016_core('CPRF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUS')
def display_random_CPUS():
courses = get_fall_2016_core('CPUS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUD')
def display_random_CPUD():
courses = get_fall_2016_core('CPUD')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CUSP')
def display_random_CUSP():
courses = get_fall_2016_core('CUSP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/images/<file>')
def get_image(file):
return send_from_directory('images', file)
@app.route('/css/<file>')
def get_css(file):
return send_from_directory('css', file)
@app.route('/js/<file>')
def get_js(file):
return send_from_directory('js', file)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data():
class_list = []
with open('counts.tsv') as fd:
for line in fd.read().splitlines():
class_data = line.split('\t')
class_list.append(class_data)
return class_list
def get_fall_2016():
directory = get_data()
fall_2016_list = []
for n in directory:
if n[0] == '2016' and n[1] == 'fall':
fall_2016_list.append(n)
return fall_2016_list
def get_fall_2016_core(core):
directory = get_fall_2016()
core_satisfied_list = []
for n in directory:
core_possible = n[9].split(';')
if core in core_possible:
core_satisfied_list.append(n)
return core_satisfied_list
<|reserved_special_token_0|>
@app.route('/CPAF')
def display_random_CPAF():
courses = get_fall_2016_core('CPAF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAS')
def display_random_CPAS():
courses = get_fall_2016_core('CPAS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
<|reserved_special_token_0|>
@app.route('/CFAP')
def display_random_CFAP():
courses = get_fall_2016_core('CFAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPGC')
def display_random_CPGC():
courses = get_fall_2016_core('CPGC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPIC')
def display_random_CPIC():
courses = get_fall_2016_core('CPIC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLS')
def display_random_CPLS():
courses = get_fall_2016_core('CPLS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLA')
def display_random_CPLA():
courses = get_fall_2016_core('CPLA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPMS')
def display_random_CPMS():
courses = get_fall_2016_core('CPMS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPPE')
def display_random_CPPE():
courses = get_fall_2016_core('CPPE')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPRF')
def display_random_CPRF():
courses = get_fall_2016_core('CPRF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUS')
def display_random_CPUS():
courses = get_fall_2016_core('CPUS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUD')
def display_random_CPUD():
courses = get_fall_2016_core('CPUD')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CUSP')
def display_random_CUSP():
courses = get_fall_2016_core('CUSP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/images/<file>')
def get_image(file):
return send_from_directory('images', file)
@app.route('/css/<file>')
def get_css(file):
return send_from_directory('css', file)
@app.route('/js/<file>')
def get_js(file):
return send_from_directory('js', file)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data():
class_list = []
with open('counts.tsv') as fd:
for line in fd.read().splitlines():
class_data = line.split('\t')
class_list.append(class_data)
return class_list
def get_fall_2016():
directory = get_data()
fall_2016_list = []
for n in directory:
if n[0] == '2016' and n[1] == 'fall':
fall_2016_list.append(n)
return fall_2016_list
def get_fall_2016_core(core):
directory = get_fall_2016()
core_satisfied_list = []
for n in directory:
core_possible = n[9].split(';')
if core in core_possible:
core_satisfied_list.append(n)
return core_satisfied_list
@app.route('/')
def display_full_courses():
courses = get_data()
return render_template('base.html', courses=courses)
@app.route('/CPAF')
def display_random_CPAF():
courses = get_fall_2016_core('CPAF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAS')
def display_random_CPAS():
courses = get_fall_2016_core('CPAS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPEU')
def display_random_CPEU():
courses = get_fall_2016_core('CPEU')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPFA')
def display_random_CPFA():
courses = get_fall_2016_core('CPFA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAP')
def display_random_CPAP():
courses = get_fall_2016_core('CPAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CFAP')
def display_random_CFAP():
courses = get_fall_2016_core('CFAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPGC')
def display_random_CPGC():
courses = get_fall_2016_core('CPGC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPIC')
def display_random_CPIC():
courses = get_fall_2016_core('CPIC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLS')
def display_random_CPLS():
courses = get_fall_2016_core('CPLS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLA')
def display_random_CPLA():
courses = get_fall_2016_core('CPLA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPMS')
def display_random_CPMS():
courses = get_fall_2016_core('CPMS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPPE')
def display_random_CPPE():
courses = get_fall_2016_core('CPPE')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPRF')
def display_random_CPRF():
courses = get_fall_2016_core('CPRF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUS')
def display_random_CPUS():
courses = get_fall_2016_core('CPUS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUD')
def display_random_CPUD():
courses = get_fall_2016_core('CPUD')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CUSP')
def display_random_CUSP():
courses = get_fall_2016_core('CUSP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/images/<file>')
def get_image(file):
return send_from_directory('images', file)
@app.route('/css/<file>')
def get_css(file):
return send_from_directory('css', file)
@app.route('/js/<file>')
def get_js(file):
return send_from_directory('js', file)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from os import chdir
from os.path import dirname, realpath
import random
from flask import Flask, render_template, send_from_directory
app = Flask(__name__)
def get_data():
class_list = []
with open('counts.tsv') as fd:
for line in fd.read().splitlines():
class_data = line.split('\t')
class_list.append(class_data)
return class_list
def get_fall_2016():
directory = get_data()
fall_2016_list = []
for n in directory:
if n[0] == '2016' and n[1] == 'fall':
fall_2016_list.append(n)
return fall_2016_list
def get_fall_2016_core(core):
directory = get_fall_2016()
core_satisfied_list = []
for n in directory:
core_possible = n[9].split(';')
if core in core_possible:
core_satisfied_list.append(n)
return core_satisfied_list
@app.route('/')
def display_full_courses():
courses = get_data()
return render_template('base.html', courses=courses)
@app.route('/CPAF')
def display_random_CPAF():
courses = get_fall_2016_core('CPAF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAS')
def display_random_CPAS():
courses = get_fall_2016_core('CPAS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPEU')
def display_random_CPEU():
courses = get_fall_2016_core('CPEU')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPFA')
def display_random_CPFA():
courses = get_fall_2016_core('CPFA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAP')
def display_random_CPAP():
courses = get_fall_2016_core('CPAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CFAP')
def display_random_CFAP():
courses = get_fall_2016_core('CFAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPGC')
def display_random_CPGC():
courses = get_fall_2016_core('CPGC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPIC')
def display_random_CPIC():
courses = get_fall_2016_core('CPIC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLS')
def display_random_CPLS():
courses = get_fall_2016_core('CPLS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLA')
def display_random_CPLA():
courses = get_fall_2016_core('CPLA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPMS')
def display_random_CPMS():
courses = get_fall_2016_core('CPMS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPPE')
def display_random_CPPE():
courses = get_fall_2016_core('CPPE')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPRF')
def display_random_CPRF():
courses = get_fall_2016_core('CPRF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUS')
def display_random_CPUS():
courses = get_fall_2016_core('CPUS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUD')
def display_random_CPUD():
courses = get_fall_2016_core('CPUD')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CUSP')
def display_random_CUSP():
courses = get_fall_2016_core('CUSP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if course3[5] != course1[5] and course3[5] != course2[5]:
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/images/<file>')
def get_image(file):
return send_from_directory('images', file)
@app.route('/css/<file>')
def get_css(file):
return send_from_directory('css', file)
@app.route('/js/<file>')
def get_js(file):
return send_from_directory('js', file)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from os import chdir
from os.path import dirname, realpath
import random
from flask import Flask, render_template, send_from_directory
app = Flask(__name__)
# gets list of list of all classes
def get_data():
class_list = []
with open('counts.tsv') as fd:
for line in fd.read().splitlines():
class_data = line.split("\t")
class_list.append(class_data)
return class_list
# Gets list of list of all fall 2016 classes
def get_fall_2016():
directory = get_data() # list of all classes of all years
fall_2016_list = []
for n in directory: # for any individual course,
if n[0] == '2016' and n[1] == 'fall':
fall_2016_list.append(n)
return fall_2016_list
# searches fall 2016 classes for a specific Core requirement and creates new list of courses that satisfy that core
def get_fall_2016_core(core):
directory = get_fall_2016()
core_satisfied_list = [] # list of all classes that satisfy specified core requirement
for n in directory:
core_possible = n[9].split(";") # splits multiple core requirements into list of the individual ones
if core in core_possible: # if core argument is satisfied by the class, add class to list of classes
core_satisfied_list.append(n)
return core_satisfied_list
@app.route('/')
def display_full_courses():
courses = get_data()
return render_template('base.html', courses=courses)
# All of the remaining functions display website with 3 (different) random classes that satisfy specified core requirement
@app.route('/CPAF')
def display_random_CPAF():
courses = get_fall_2016_core('CPAF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10): # ensures second course is different from first one
course2 = random.choice(courses)
if course2[5] != course1[5]:
break # if course titles are different, keep course 2. if not, try again
for i in range(10): # ensures third course is different from first and second
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break # if course titles are different, keep course 3. if not, try again
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAS')
def display_random_CPAS():
courses = get_fall_2016_core('CPAS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPEU')
def display_random_CPEU():
courses = get_fall_2016_core('CPEU')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPFA')
def display_random_CPFA():
courses = get_fall_2016_core('CPFA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAP')
def display_random_CPAP():
courses = get_fall_2016_core('CPAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CFAP')
def display_random_CFAP():
courses = get_fall_2016_core('CFAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPGC')
def display_random_CPGC():
courses = get_fall_2016_core('CPGC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPIC')
def display_random_CPIC():
courses = get_fall_2016_core('CPIC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLS')
def display_random_CPLS():
courses = get_fall_2016_core('CPLS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLA')
def display_random_CPLA():
courses = get_fall_2016_core('CPLA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPMS')
def display_random_CPMS():
courses = get_fall_2016_core('CPMS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPPE')
def display_random_CPPE():
courses = get_fall_2016_core('CPPE')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPRF')
def display_random_CPRF():
courses = get_fall_2016_core('CPRF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUS')
def display_random_CPUS():
courses = get_fall_2016_core('CPUS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUD')
def display_random_CPUD():
courses = get_fall_2016_core('CPUD')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CUSP')
def display_random_CUSP():
courses = get_fall_2016_core('CUSP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
# The functions below lets you access files in the css, js, and images folders.
# You should not change them unless you know what you are doing.
@app.route('/images/<file>')
def get_image(file):
return send_from_directory('images', file)
@app.route('/css/<file>')
def get_css(file):
return send_from_directory('css', file)
@app.route('/js/<file>')
def get_js(file):
return send_from_directory('js', file)
if __name__ == '__main__':
app.run(debug=True)
|
flexible
|
{
"blob_id": "af8a3fbce35685cd89dee72449a8be2a133b4a3f",
"index": 4684,
"step-1": "<mask token>\n\n\ndef get_data():\n class_list = []\n with open('counts.tsv') as fd:\n for line in fd.read().splitlines():\n class_data = line.split('\\t')\n class_list.append(class_data)\n return class_list\n\n\ndef get_fall_2016():\n directory = get_data()\n fall_2016_list = []\n for n in directory:\n if n[0] == '2016' and n[1] == 'fall':\n fall_2016_list.append(n)\n return fall_2016_list\n\n\ndef get_fall_2016_core(core):\n directory = get_fall_2016()\n core_satisfied_list = []\n for n in directory:\n core_possible = n[9].split(';')\n if core in core_possible:\n core_satisfied_list.append(n)\n return core_satisfied_list\n\n\n<mask token>\n\n\n@app.route('/CPAF')\ndef display_random_CPAF():\n courses = get_fall_2016_core('CPAF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPAS')\ndef display_random_CPAS():\n courses = get_fall_2016_core('CPAS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n<mask token>\n\n\n@app.route('/CFAP')\ndef display_random_CFAP():\n courses = get_fall_2016_core('CFAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n<mask token>\n\n\n@app.route('/CPLS')\ndef display_random_CPLS():\n courses = get_fall_2016_core('CPLS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLA')\ndef display_random_CPLA():\n courses = get_fall_2016_core('CPLA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPMS')\ndef display_random_CPMS():\n courses = get_fall_2016_core('CPMS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPPE')\ndef display_random_CPPE():\n courses = get_fall_2016_core('CPPE')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPRF')\ndef display_random_CPRF():\n courses = get_fall_2016_core('CPRF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUS')\ndef display_random_CPUS():\n courses = get_fall_2016_core('CPUS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUD')\ndef display_random_CPUD():\n courses = get_fall_2016_core('CPUD')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CUSP')\ndef display_random_CUSP():\n courses = get_fall_2016_core('CUSP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/images/<file>')\ndef get_image(file):\n return send_from_directory('images', file)\n\n\n@app.route('/css/<file>')\ndef get_css(file):\n return send_from_directory('css', file)\n\n\n@app.route('/js/<file>')\ndef get_js(file):\n return send_from_directory('js', file)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data():\n class_list = []\n with open('counts.tsv') as fd:\n for line in fd.read().splitlines():\n class_data = line.split('\\t')\n class_list.append(class_data)\n return class_list\n\n\ndef get_fall_2016():\n directory = get_data()\n fall_2016_list = []\n for n in directory:\n if n[0] == '2016' and n[1] == 'fall':\n fall_2016_list.append(n)\n return fall_2016_list\n\n\ndef get_fall_2016_core(core):\n directory = get_fall_2016()\n core_satisfied_list = []\n for n in directory:\n core_possible = n[9].split(';')\n if core in core_possible:\n core_satisfied_list.append(n)\n return core_satisfied_list\n\n\n<mask token>\n\n\n@app.route('/CPAF')\ndef display_random_CPAF():\n courses = get_fall_2016_core('CPAF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPAS')\ndef display_random_CPAS():\n courses = get_fall_2016_core('CPAS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n<mask token>\n\n\n@app.route('/CFAP')\ndef display_random_CFAP():\n courses = get_fall_2016_core('CFAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPGC')\ndef display_random_CPGC():\n courses = get_fall_2016_core('CPGC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPIC')\ndef display_random_CPIC():\n courses = get_fall_2016_core('CPIC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLS')\ndef display_random_CPLS():\n courses = get_fall_2016_core('CPLS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLA')\ndef display_random_CPLA():\n courses = get_fall_2016_core('CPLA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPMS')\ndef display_random_CPMS():\n courses = get_fall_2016_core('CPMS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPPE')\ndef display_random_CPPE():\n courses = get_fall_2016_core('CPPE')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPRF')\ndef display_random_CPRF():\n courses = get_fall_2016_core('CPRF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUS')\ndef display_random_CPUS():\n courses = get_fall_2016_core('CPUS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUD')\ndef display_random_CPUD():\n courses = get_fall_2016_core('CPUD')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CUSP')\ndef display_random_CUSP():\n courses = get_fall_2016_core('CUSP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/images/<file>')\ndef get_image(file):\n return send_from_directory('images', file)\n\n\n@app.route('/css/<file>')\ndef get_css(file):\n return send_from_directory('css', file)\n\n\n@app.route('/js/<file>')\ndef get_js(file):\n return send_from_directory('js', file)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_data():\n class_list = []\n with open('counts.tsv') as fd:\n for line in fd.read().splitlines():\n class_data = line.split('\\t')\n class_list.append(class_data)\n return class_list\n\n\ndef get_fall_2016():\n directory = get_data()\n fall_2016_list = []\n for n in directory:\n if n[0] == '2016' and n[1] == 'fall':\n fall_2016_list.append(n)\n return fall_2016_list\n\n\ndef get_fall_2016_core(core):\n directory = get_fall_2016()\n core_satisfied_list = []\n for n in directory:\n core_possible = n[9].split(';')\n if core in core_possible:\n core_satisfied_list.append(n)\n return core_satisfied_list\n\n\n@app.route('/')\ndef display_full_courses():\n courses = get_data()\n return render_template('base.html', courses=courses)\n\n\n@app.route('/CPAF')\ndef display_random_CPAF():\n courses = get_fall_2016_core('CPAF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPAS')\ndef display_random_CPAS():\n courses = get_fall_2016_core('CPAS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPEU')\ndef display_random_CPEU():\n courses = get_fall_2016_core('CPEU')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPFA')\ndef display_random_CPFA():\n courses = get_fall_2016_core('CPFA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPAP')\ndef display_random_CPAP():\n courses = get_fall_2016_core('CPAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CFAP')\ndef display_random_CFAP():\n courses = get_fall_2016_core('CFAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPGC')\ndef display_random_CPGC():\n courses = get_fall_2016_core('CPGC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPIC')\ndef display_random_CPIC():\n courses = get_fall_2016_core('CPIC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLS')\ndef display_random_CPLS():\n courses = get_fall_2016_core('CPLS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLA')\ndef display_random_CPLA():\n courses = get_fall_2016_core('CPLA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPMS')\ndef display_random_CPMS():\n courses = get_fall_2016_core('CPMS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPPE')\ndef display_random_CPPE():\n courses = get_fall_2016_core('CPPE')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPRF')\ndef display_random_CPRF():\n courses = get_fall_2016_core('CPRF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUS')\ndef display_random_CPUS():\n courses = get_fall_2016_core('CPUS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUD')\ndef display_random_CPUD():\n courses = get_fall_2016_core('CPUD')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CUSP')\ndef display_random_CUSP():\n courses = get_fall_2016_core('CUSP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/images/<file>')\ndef get_image(file):\n return send_from_directory('images', file)\n\n\n@app.route('/css/<file>')\ndef get_css(file):\n return send_from_directory('css', file)\n\n\n@app.route('/js/<file>')\ndef get_js(file):\n return send_from_directory('js', file)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from os import chdir\nfrom os.path import dirname, realpath\nimport random\nfrom flask import Flask, render_template, send_from_directory\napp = Flask(__name__)\n\n\ndef get_data():\n class_list = []\n with open('counts.tsv') as fd:\n for line in fd.read().splitlines():\n class_data = line.split('\\t')\n class_list.append(class_data)\n return class_list\n\n\ndef get_fall_2016():\n directory = get_data()\n fall_2016_list = []\n for n in directory:\n if n[0] == '2016' and n[1] == 'fall':\n fall_2016_list.append(n)\n return fall_2016_list\n\n\ndef get_fall_2016_core(core):\n directory = get_fall_2016()\n core_satisfied_list = []\n for n in directory:\n core_possible = n[9].split(';')\n if core in core_possible:\n core_satisfied_list.append(n)\n return core_satisfied_list\n\n\n@app.route('/')\ndef display_full_courses():\n courses = get_data()\n return render_template('base.html', courses=courses)\n\n\n@app.route('/CPAF')\ndef display_random_CPAF():\n courses = get_fall_2016_core('CPAF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPAS')\ndef display_random_CPAS():\n courses = get_fall_2016_core('CPAS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPEU')\ndef display_random_CPEU():\n courses = get_fall_2016_core('CPEU')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPFA')\ndef display_random_CPFA():\n courses = get_fall_2016_core('CPFA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPAP')\ndef display_random_CPAP():\n courses = get_fall_2016_core('CPAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CFAP')\ndef display_random_CFAP():\n courses = get_fall_2016_core('CFAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPGC')\ndef display_random_CPGC():\n courses = get_fall_2016_core('CPGC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPIC')\ndef display_random_CPIC():\n courses = get_fall_2016_core('CPIC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLS')\ndef display_random_CPLS():\n courses = get_fall_2016_core('CPLS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPLA')\ndef display_random_CPLA():\n courses = get_fall_2016_core('CPLA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPMS')\ndef display_random_CPMS():\n courses = get_fall_2016_core('CPMS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPPE')\ndef display_random_CPPE():\n courses = get_fall_2016_core('CPPE')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPRF')\ndef display_random_CPRF():\n courses = get_fall_2016_core('CPRF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUS')\ndef display_random_CPUS():\n courses = get_fall_2016_core('CPUS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CPUD')\ndef display_random_CPUD():\n courses = get_fall_2016_core('CPUD')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/CUSP')\ndef display_random_CUSP():\n courses = get_fall_2016_core('CUSP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if course3[5] != course1[5] and course3[5] != course2[5]:\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n\n@app.route('/images/<file>')\ndef get_image(file):\n return send_from_directory('images', file)\n\n\n@app.route('/css/<file>')\ndef get_css(file):\n return send_from_directory('css', file)\n\n\n@app.route('/js/<file>')\ndef get_js(file):\n return send_from_directory('js', file)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from os import chdir\nfrom os.path import dirname, realpath\nimport random\n\nfrom flask import Flask, render_template, send_from_directory\n\napp = Flask(__name__)\n\n\n# gets list of list of all classes\ndef get_data():\n class_list = []\n with open('counts.tsv') as fd:\n for line in fd.read().splitlines():\n class_data = line.split(\"\\t\")\n class_list.append(class_data)\n return class_list\n\n\n# Gets list of list of all fall 2016 classes\ndef get_fall_2016():\n directory = get_data() # list of all classes of all years\n fall_2016_list = []\n for n in directory: # for any individual course,\n if n[0] == '2016' and n[1] == 'fall':\n fall_2016_list.append(n)\n return fall_2016_list\n\n\n# searches fall 2016 classes for a specific Core requirement and creates new list of courses that satisfy that core\ndef get_fall_2016_core(core):\n directory = get_fall_2016()\n core_satisfied_list = [] # list of all classes that satisfy specified core requirement\n for n in directory:\n core_possible = n[9].split(\";\") # splits multiple core requirements into list of the individual ones\n if core in core_possible: # if core argument is satisfied by the class, add class to list of classes\n core_satisfied_list.append(n)\n return core_satisfied_list\n\n\n@app.route('/')\ndef display_full_courses():\n courses = get_data()\n return render_template('base.html', courses=courses)\n\n# All of the remaining functions display website with 3 (different) random classes that satisfy specified core requirement\n\n@app.route('/CPAF')\ndef display_random_CPAF():\n courses = get_fall_2016_core('CPAF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10): # ensures second course is different from first one\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break # if course titles are different, keep course 2. if not, try again\n for i in range(10): # ensures third course is different from first and second\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break # if course titles are different, keep course 3. if not, try again\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPAS')\ndef display_random_CPAS():\n courses = get_fall_2016_core('CPAS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPEU')\ndef display_random_CPEU():\n courses = get_fall_2016_core('CPEU')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPFA')\ndef display_random_CPFA():\n courses = get_fall_2016_core('CPFA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPAP')\ndef display_random_CPAP():\n courses = get_fall_2016_core('CPAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CFAP')\ndef display_random_CFAP():\n courses = get_fall_2016_core('CFAP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPGC')\ndef display_random_CPGC():\n courses = get_fall_2016_core('CPGC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPIC')\ndef display_random_CPIC():\n courses = get_fall_2016_core('CPIC')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPLS')\ndef display_random_CPLS():\n courses = get_fall_2016_core('CPLS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPLA')\ndef display_random_CPLA():\n courses = get_fall_2016_core('CPLA')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPMS')\ndef display_random_CPMS():\n courses = get_fall_2016_core('CPMS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPPE')\ndef display_random_CPPE():\n courses = get_fall_2016_core('CPPE')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPRF')\ndef display_random_CPRF():\n courses = get_fall_2016_core('CPRF')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPUS')\ndef display_random_CPUS():\n courses = get_fall_2016_core('CPUS')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CPUD')\ndef display_random_CPUD():\n courses = get_fall_2016_core('CPUD')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n@app.route('/CUSP')\ndef display_random_CUSP():\n courses = get_fall_2016_core('CUSP')\n list_of_three = []\n course1 = random.choice(courses)\n for i in range(10):\n course2 = random.choice(courses)\n if course2[5] != course1[5]:\n break\n for i in range(10):\n course3 = random.choice(courses)\n if (course3[5] != course1[5]) and (course3[5] != course2[5]):\n break\n list_of_three.append(course1)\n list_of_three.append(course2)\n list_of_three.append(course3)\n return render_template('courses.html', courses=list_of_three)\n\n# The functions below lets you access files in the css, js, and images folders.\n# You should not change them unless you know what you are doing.\n\n@app.route('/images/<file>')\ndef get_image(file):\n return send_from_directory('images', file)\n\n@app.route('/css/<file>')\ndef get_css(file):\n return send_from_directory('css', file)\n\n@app.route('/js/<file>')\ndef get_js(file):\n return send_from_directory('js', file)\n\nif __name__ == '__main__':\n app.run(debug=True)",
"step-ids": [
17,
19,
24,
26,
27
]
}
|
[
17,
19,
24,
26,
27
] |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from sqlalchemy import create_engine, MetaData, Table
class DoubanPipeline(object):
conn = None
film_table = None
def open_spider(self, spider):
engine = create_engine('postgresql+psycopg2://postgres:orchid@127.0.0.1:5432/postgres', echo=False)
self.conn = engine.connect()
metadata = MetaData(engine)
self.film_table = Table('film', metadata, autoload=True)
def process_item(self, item, spider):
ins = self.film_table.insert().values(item)
try:
self.conn.execute(ins)
except Exception, e:
pass
return item
def close_spider(self, spider):
self.conn.close()
|
normal
|
{
"blob_id": "5ef6b2ff89ee1667ddb01b1936557f1f11a49910",
"index": 4673,
"step-1": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom sqlalchemy import create_engine, MetaData, Table\n\n\nclass DoubanPipeline(object):\n conn = None\n film_table = None\n\n def open_spider(self, spider):\n engine = create_engine('postgresql+psycopg2://postgres:orchid@127.0.0.1:5432/postgres', echo=False)\n self.conn = engine.connect()\n metadata = MetaData(engine)\n self.film_table = Table('film', metadata, autoload=True)\n\n def process_item(self, item, spider):\n ins = self.film_table.insert().values(item)\n try:\n self.conn.execute(ins)\n except Exception, e:\n pass\n\n return item\n\n def close_spider(self, spider):\n self.conn.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',
'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed to run by returning code of {}. {}'.
format(request.status_code, query))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',
'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed to run by returning code of {}. {}'.
format(request.status_code, query))
<|reserved_special_token_0|>
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {'query': query2, 'variables': {'queryString': query}}
return __run_query(self, json)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',
'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed to run by returning code of {}. {}'.
format(request.status_code, query))
def user_get_starred(self, username):
query = """
query userGetStarred($username: String!){
user(login: $username){
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
following(first:100){
nodes{
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
}
}
"""
json = {'query': query, 'variables': {'username': username}}
return __run_query(self, json)
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {'query': query2, 'variables': {'queryString': query}}
return __run_query(self, json)
<|reserved_special_token_1|>
import requests
from requests.auth import HTTPBasicAuth
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',
'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed to run by returning code of {}. {}'.
format(request.status_code, query))
def user_get_starred(self, username):
query = """
query userGetStarred($username: String!){
user(login: $username){
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
following(first:100){
nodes{
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
}
}
"""
json = {'query': query, 'variables': {'username': username}}
return __run_query(self, json)
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {'query': query2, 'variables': {'queryString': query}}
return __run_query(self, json)
<|reserved_special_token_1|>
import requests
from requests.auth import HTTPBasicAuth
def __run_query(self, query):
URL = 'https://api.github.com/graphql'
request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def user_get_starred(self, username):
query = """
query userGetStarred($username: String!){
user(login: $username){
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
following(first:100){
nodes{
starredRepositories(first:100){
nodes{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
}
}
"""
json = {
"query": query, "variables":{
"username": username
}
}
return __run_query(self, json)
def repos_for_query(self, query):
query2 = """
query queryByItems($queryString: String!){
search(query:$queryString, type:REPOSITORY, first: 100){
nodes{
... on Repository{
nameWithOwner
description
stargazers{
totalCount
}
}
}
}
}
"""
json = {
"query": query2, "variables":{
"queryString": query
}
}
return __run_query(self, json)
|
flexible
|
{
"blob_id": "fa511411e59880fd80fba0ccc49c95d42cb4b78d",
"index": 6962,
"step-1": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\n<mask token>\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-3": "<mask token>\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query, 'variables': {'username': username}}\n return __run_query(self, json)\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-4": "import requests\nfrom requests.auth import HTTPBasicAuth\n\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n request = requests.post(URL, json=query, auth=HTTPBasicAuth('gleisonbt',\n 'Aleister93'))\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception('Query failed to run by returning code of {}. {}'.\n format(request.status_code, query))\n\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query, 'variables': {'username': username}}\n return __run_query(self, json)\n\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n json = {'query': query2, 'variables': {'queryString': query}}\n return __run_query(self, json)\n",
"step-5": "import requests\nfrom requests.auth import HTTPBasicAuth\n\ndef __run_query(self, query):\n URL = 'https://api.github.com/graphql'\n\n request = requests.post(URL, json=query,auth=HTTPBasicAuth('gleisonbt', 'Aleister93'))\n\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query failed to run by returning code of {}. {}\".format(request.status_code, query))\n\ndef user_get_starred(self, username):\n query = \"\"\"\n query userGetStarred($username: String!){\n user(login: $username){\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n following(first:100){\n nodes{\n starredRepositories(first:100){\n nodes{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n }\n }\n \"\"\"\n\n json = {\n \"query\": query, \"variables\":{\n \"username\": username\n }\n }\n\n return __run_query(self, json)\n\ndef repos_for_query(self, query):\n query2 = \"\"\"\n query queryByItems($queryString: String!){\n search(query:$queryString, type:REPOSITORY, first: 100){\n nodes{\n ... on Repository{\n nameWithOwner\n description\n stargazers{\n totalCount\n }\n }\n }\n }\n }\n \"\"\"\n\n json = {\n \"query\": query2, \"variables\":{\n \"queryString\": query\n }\n }\n\n return __run_query(self, json)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
host, port = "localhost", 9999
import os
import sys
import signal
import socket
import time
import select
from SocketServer import TCPServer
from SocketServer import StreamRequestHandler
class TimeoutException(Exception):
pass
def read_command(rfile,wfile,prompt):
def timeout_handler(signum, frame):
raise TimeoutException()
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(1)
try:
if prompt:
wfile.write('\n> ')
c = rfile.readline()
except TimeoutException:
c = ''
finally:
signal.alarm(0)
return c.strip()
class Control (StreamRequestHandler):
allow_reuse_address = True
def handle(self):
command = 'go'
prompt = True
while command not in ['quit','exit']:
# reading the command on TCP
# relaying it to exabgp via the socket
command = read_command(self.rfile,self.wfile,prompt)
prompt = False
if command in ['quit','exit']:
continue
if command in ['help','?']:
self.wfile.write('exabgp tcp-control help\n')
self.wfile.write('\n')
self.wfile.write('This program is just a way to manually enter commands using telnet\n')
self.wfile.write('routes and flows syntax are parsed like normal configuration\n')
self.wfile.write('\n')
self.wfile.write('quit (close the telnet connection)\n')
self.wfile.write('exit (close the telnet connection)\n')
self.wfile.write('\n')
self.wfile.write('version (returns the version of exabgp)\n')
self.wfile.write('reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\n')
self.wfile.write('restart (reload the configuration and bounce all BGP session)\n')
self.wfile.write('shutdown (politely terminate all session and exit)\n')
self.wfile.write('\n')
self.wfile.write('WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\n')
self.wfile.write('WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\n')
self.wfile.write('\n')
self.wfile.write('The route will be sent to ALL the peers (there is no way to filter the announcement yet)\n')
self.wfile.write('\n')
self.wfile.write('annouce route\n')
self.wfile.write(' The multi-line syntax is currently not supported\n')
self.wfile.write(' example: announce route 1.2.3.4 next-hop 5.6.7.8\n')
self.wfile.write('withdraw route\n')
self.wfile.write(' example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\n')
self.wfile.write('announce flow\n')
self.wfile.write(' exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\n\n')
self.wfile.write(' example: announce flow route {\\n match {\\n source 10.0.0.1/32;\\n destination 1.2.3.4/32;\\n }\\n then {\\n discard;\\n }\\n }\\n\n')
self.wfile.write('withdraw flow\n')
self.wfile.write(' exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\n\n')
self.wfile.write(' example: withdraw flow route {\\n match {\\n source 10.0.0.1/32;\\n destination 1.2.3.4/32;\\n }\\n then {\\n discard;\\n }\\n }\\n\n')
self.wfile.write('\n')
self.wfile.write('SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\n')
self.wfile.write('AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\n')
self.wfile.write('\n')
self.wfile.write('show neighbors\n')
self.wfile.write(' display the neighbor configured\\n\n')
self.wfile.write('show routes\n')
self.wfile.write(' display routes which have been announced\\n\n')
self.wfile.write('\n')
self.wfile.flush()
prompt = True
elif command.startswith('announce '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('requested %s annoucement\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command.startswith('withdraw '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('request %s withdrawal\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command.startswith('neighbor '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('neighbor %s requested\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command.startswith('show '):
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
self.wfile.write('%s requested\n' % command.split(' ')[1])
self.wfile.flush()
prompt = True
elif command in ['shutdown','reload','restart','version']:
sys.stdout.write('%s\n' % command)
sys.stdout.flush()
prompt = True
elif command not in ['go','']:
self.wfile.write('unknown command [%s], try: help\n' % command)
self.wfile.flush()
prompt = True
try:
r,_,_ = select.select([sys.stdin], [], [], 1.0)
except select.error:
raise KeyboardInterrupt('SIGNAL received in select')
if r:
self.wfile.write('\n')
while r:
# Can not use readline with select.
# From http://stackoverflow.com/questions/5486717/python-select-doesnt-signal-all-input-from-pipe
# Note that internally file.readlines([size]) loops and invokes the read() syscall more than once, attempting to fill an internal buffer of size. The first call to read() will immediately return, since select() indicated the fd was readable. However the 2nd call will block until data is available, which defeats the purpose of using select. In any case it is tricky to use file.readlines([size]) in an asynchronous app.
response = os.read(sys.stdin.fileno(),4096)
# this should not happen as select informed us of data to read but it seems it does
if not response:
break
self.wfile.write(response)
prompt = True
time.sleep(0.1)
try:
r,_,_ = select.select([sys.stdin], [], [], 1.0)
except select.error:
raise KeyboardInterrupt('SIGNAL received in select')
continue
def timed (message):
now = time.strftime('%a, %d %b %Y %H:%M:%S',time.localtime())
return "%s | %-8s | %-6d | %-13s | %s" % (now,'FORKED',os.getpid(),'tcp-server',message)
def sig (signum, frame):
# outch rude but prevent silly trace on exit if waiting for a read on stdin :p
os.kill(os.getpid(),signal.SIGKILL)
signal.signal(signal.SIGINT, sig)
signal.signal(signal.SIGTERM, sig)
count = 0
connected = False
class Server (TCPServer):
def server_activate (self):
print >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,port))
sys.stderr.flush()
TCPServer.server_activate(self)
while not connected:
try:
server = Server((host, port), Control)
connected = True
except socket.error:
count += 1
if count % 1 == 0:
print >> sys.stderr, timed('tcp-server still trying to bind to %s:%d' % (host,port))
# we can not connect to the socket, retrying (happens if respawns too quickly)
time.sleep(1)
server.serve_forever()
|
normal
|
{
"blob_id": "e00b81f73f4f639e008fde1a6b2d4f7937df4207",
"index": 8518,
"step-1": "<mask token>\n\n\nclass TimeoutException(Exception):\n pass\n\n\n<mask token>\n\n\nclass Control(StreamRequestHandler):\n allow_reuse_address = True\n\n def handle(self):\n command = 'go'\n prompt = True\n while command not in ['quit', 'exit']:\n command = read_command(self.rfile, self.wfile, prompt)\n prompt = False\n if command in ['quit', 'exit']:\n continue\n if command in ['help', '?']:\n self.wfile.write('exabgp tcp-control help\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"This program is just a way to manually enter commands using telnet\n\"\"\"\n )\n self.wfile.write(\n 'routes and flows syntax are parsed like normal configuration\\n'\n )\n self.wfile.write('\\n')\n self.wfile.write('quit (close the telnet connection)\\n')\n self.wfile.write('exit (close the telnet connection)\\n')\n self.wfile.write('\\n')\n self.wfile.write('version (returns the version of exabgp)\\n')\n self.wfile.write(\n \"\"\"reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\n\"\"\"\n )\n self.wfile.write(\n 'restart (reload the configuration and bounce all BGP session)\\n'\n )\n self.wfile.write(\n 'shutdown (politely terminate all session and exit)\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\n\"\"\"\n )\n self.wfile.write(\n \"\"\"WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"The route will be sent to ALL the peers (there is no way to filter the announcement yet)\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('annouce route\\n')\n self.wfile.write(\n ' The multi-line syntax is currently not supported\\n')\n self.wfile.write(\n ' example: announce route 1.2.3.4 next-hop 5.6.7.8\\n')\n self.wfile.write('withdraw route\\n')\n self.wfile.write(\n \"\"\" example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\n\"\"\"\n )\n self.wfile.write('announce flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: announce flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('withdraw flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: withdraw flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\n\"\"\"\n )\n self.wfile.write(\n \"\"\"AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('show neighbors\\n')\n self.wfile.write(' display the neighbor configured\\\\n\\n')\n self.wfile.write('show routes\\n')\n self.wfile.write(\n ' display routes which have been announced\\\\n\\n')\n self.wfile.write('\\n')\n self.wfile.flush()\n prompt = True\n elif command.startswith('announce '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('requested %s annoucement\\n' % command.\n split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('withdraw '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('request %s withdrawal\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('neighbor '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('neighbor %s requested\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('show '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('%s requested\\n' % command.split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command in ['shutdown', 'reload', 'restart', 'version']:\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n prompt = True\n elif command not in ['go', '']:\n self.wfile.write('unknown command [%s], try: help\\n' % command)\n self.wfile.flush()\n prompt = True\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n if r:\n self.wfile.write('\\n')\n while r:\n response = os.read(sys.stdin.fileno(), 4096)\n if not response:\n break\n self.wfile.write(response)\n prompt = True\n time.sleep(0.1)\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n continue\n\n\n<mask token>\n\n\nclass Server(TCPServer):\n\n def server_activate(self):\n print >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,\n port))\n sys.stderr.flush()\n TCPServer.server_activate(self)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TimeoutException(Exception):\n pass\n\n\ndef read_command(rfile, wfile, prompt):\n\n def timeout_handler(signum, frame):\n raise TimeoutException()\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(1)\n try:\n if prompt:\n wfile.write('\\n> ')\n c = rfile.readline()\n except TimeoutException:\n c = ''\n finally:\n signal.alarm(0)\n return c.strip()\n\n\nclass Control(StreamRequestHandler):\n allow_reuse_address = True\n\n def handle(self):\n command = 'go'\n prompt = True\n while command not in ['quit', 'exit']:\n command = read_command(self.rfile, self.wfile, prompt)\n prompt = False\n if command in ['quit', 'exit']:\n continue\n if command in ['help', '?']:\n self.wfile.write('exabgp tcp-control help\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"This program is just a way to manually enter commands using telnet\n\"\"\"\n )\n self.wfile.write(\n 'routes and flows syntax are parsed like normal configuration\\n'\n )\n self.wfile.write('\\n')\n self.wfile.write('quit (close the telnet connection)\\n')\n self.wfile.write('exit (close the telnet connection)\\n')\n self.wfile.write('\\n')\n self.wfile.write('version (returns the version of exabgp)\\n')\n self.wfile.write(\n \"\"\"reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\n\"\"\"\n )\n self.wfile.write(\n 'restart (reload the configuration and bounce all BGP session)\\n'\n )\n self.wfile.write(\n 'shutdown (politely terminate all session and exit)\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\n\"\"\"\n )\n self.wfile.write(\n \"\"\"WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"The route will be sent to ALL the peers (there is no way to filter the announcement yet)\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('annouce route\\n')\n self.wfile.write(\n ' The multi-line syntax is currently not supported\\n')\n self.wfile.write(\n ' example: announce route 1.2.3.4 next-hop 5.6.7.8\\n')\n self.wfile.write('withdraw route\\n')\n self.wfile.write(\n \"\"\" example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\n\"\"\"\n )\n self.wfile.write('announce flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: announce flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('withdraw flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: withdraw flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\n\"\"\"\n )\n self.wfile.write(\n \"\"\"AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('show neighbors\\n')\n self.wfile.write(' display the neighbor configured\\\\n\\n')\n self.wfile.write('show routes\\n')\n self.wfile.write(\n ' display routes which have been announced\\\\n\\n')\n self.wfile.write('\\n')\n self.wfile.flush()\n prompt = True\n elif command.startswith('announce '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('requested %s annoucement\\n' % command.\n split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('withdraw '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('request %s withdrawal\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('neighbor '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('neighbor %s requested\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('show '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('%s requested\\n' % command.split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command in ['shutdown', 'reload', 'restart', 'version']:\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n prompt = True\n elif command not in ['go', '']:\n self.wfile.write('unknown command [%s], try: help\\n' % command)\n self.wfile.flush()\n prompt = True\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n if r:\n self.wfile.write('\\n')\n while r:\n response = os.read(sys.stdin.fileno(), 4096)\n if not response:\n break\n self.wfile.write(response)\n prompt = True\n time.sleep(0.1)\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n continue\n\n\ndef timed(message):\n now = time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime())\n return '%s | %-8s | %-6d | %-13s | %s' % (now, 'FORKED', os.getpid(),\n 'tcp-server', message)\n\n\ndef sig(signum, frame):\n os.kill(os.getpid(), signal.SIGKILL)\n\n\n<mask token>\n\n\nclass Server(TCPServer):\n\n def server_activate(self):\n print >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,\n port))\n sys.stderr.flush()\n TCPServer.server_activate(self)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TimeoutException(Exception):\n pass\n\n\ndef read_command(rfile, wfile, prompt):\n\n def timeout_handler(signum, frame):\n raise TimeoutException()\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(1)\n try:\n if prompt:\n wfile.write('\\n> ')\n c = rfile.readline()\n except TimeoutException:\n c = ''\n finally:\n signal.alarm(0)\n return c.strip()\n\n\nclass Control(StreamRequestHandler):\n allow_reuse_address = True\n\n def handle(self):\n command = 'go'\n prompt = True\n while command not in ['quit', 'exit']:\n command = read_command(self.rfile, self.wfile, prompt)\n prompt = False\n if command in ['quit', 'exit']:\n continue\n if command in ['help', '?']:\n self.wfile.write('exabgp tcp-control help\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"This program is just a way to manually enter commands using telnet\n\"\"\"\n )\n self.wfile.write(\n 'routes and flows syntax are parsed like normal configuration\\n'\n )\n self.wfile.write('\\n')\n self.wfile.write('quit (close the telnet connection)\\n')\n self.wfile.write('exit (close the telnet connection)\\n')\n self.wfile.write('\\n')\n self.wfile.write('version (returns the version of exabgp)\\n')\n self.wfile.write(\n \"\"\"reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\n\"\"\"\n )\n self.wfile.write(\n 'restart (reload the configuration and bounce all BGP session)\\n'\n )\n self.wfile.write(\n 'shutdown (politely terminate all session and exit)\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\n\"\"\"\n )\n self.wfile.write(\n \"\"\"WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"The route will be sent to ALL the peers (there is no way to filter the announcement yet)\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('annouce route\\n')\n self.wfile.write(\n ' The multi-line syntax is currently not supported\\n')\n self.wfile.write(\n ' example: announce route 1.2.3.4 next-hop 5.6.7.8\\n')\n self.wfile.write('withdraw route\\n')\n self.wfile.write(\n \"\"\" example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\n\"\"\"\n )\n self.wfile.write('announce flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: announce flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('withdraw flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: withdraw flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\n\"\"\"\n )\n self.wfile.write(\n \"\"\"AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('show neighbors\\n')\n self.wfile.write(' display the neighbor configured\\\\n\\n')\n self.wfile.write('show routes\\n')\n self.wfile.write(\n ' display routes which have been announced\\\\n\\n')\n self.wfile.write('\\n')\n self.wfile.flush()\n prompt = True\n elif command.startswith('announce '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('requested %s annoucement\\n' % command.\n split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('withdraw '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('request %s withdrawal\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('neighbor '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('neighbor %s requested\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('show '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('%s requested\\n' % command.split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command in ['shutdown', 'reload', 'restart', 'version']:\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n prompt = True\n elif command not in ['go', '']:\n self.wfile.write('unknown command [%s], try: help\\n' % command)\n self.wfile.flush()\n prompt = True\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n if r:\n self.wfile.write('\\n')\n while r:\n response = os.read(sys.stdin.fileno(), 4096)\n if not response:\n break\n self.wfile.write(response)\n prompt = True\n time.sleep(0.1)\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n continue\n\n\ndef timed(message):\n now = time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime())\n return '%s | %-8s | %-6d | %-13s | %s' % (now, 'FORKED', os.getpid(),\n 'tcp-server', message)\n\n\ndef sig(signum, frame):\n os.kill(os.getpid(), signal.SIGKILL)\n\n\nsignal.signal(signal.SIGINT, sig)\nsignal.signal(signal.SIGTERM, sig)\n<mask token>\n\n\nclass Server(TCPServer):\n\n def server_activate(self):\n print >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,\n port))\n sys.stderr.flush()\n TCPServer.server_activate(self)\n\n\nwhile not connected:\n try:\n server = Server((host, port), Control)\n connected = True\n except socket.error:\n count += 1\n if count % 1 == 0:\n print >> sys.stderr, timed(\n 'tcp-server still trying to bind to %s:%d' % (host, port))\n time.sleep(1)\nserver.serve_forever()\n",
"step-4": "host, port = 'localhost', 9999\n<mask token>\n\n\nclass TimeoutException(Exception):\n pass\n\n\ndef read_command(rfile, wfile, prompt):\n\n def timeout_handler(signum, frame):\n raise TimeoutException()\n signal.signal(signal.SIGALRM, timeout_handler)\n signal.alarm(1)\n try:\n if prompt:\n wfile.write('\\n> ')\n c = rfile.readline()\n except TimeoutException:\n c = ''\n finally:\n signal.alarm(0)\n return c.strip()\n\n\nclass Control(StreamRequestHandler):\n allow_reuse_address = True\n\n def handle(self):\n command = 'go'\n prompt = True\n while command not in ['quit', 'exit']:\n command = read_command(self.rfile, self.wfile, prompt)\n prompt = False\n if command in ['quit', 'exit']:\n continue\n if command in ['help', '?']:\n self.wfile.write('exabgp tcp-control help\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"This program is just a way to manually enter commands using telnet\n\"\"\"\n )\n self.wfile.write(\n 'routes and flows syntax are parsed like normal configuration\\n'\n )\n self.wfile.write('\\n')\n self.wfile.write('quit (close the telnet connection)\\n')\n self.wfile.write('exit (close the telnet connection)\\n')\n self.wfile.write('\\n')\n self.wfile.write('version (returns the version of exabgp)\\n')\n self.wfile.write(\n \"\"\"reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\n\"\"\"\n )\n self.wfile.write(\n 'restart (reload the configuration and bounce all BGP session)\\n'\n )\n self.wfile.write(\n 'shutdown (politely terminate all session and exit)\\n')\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\n\"\"\"\n )\n self.wfile.write(\n \"\"\"WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"The route will be sent to ALL the peers (there is no way to filter the announcement yet)\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('annouce route\\n')\n self.wfile.write(\n ' The multi-line syntax is currently not supported\\n')\n self.wfile.write(\n ' example: announce route 1.2.3.4 next-hop 5.6.7.8\\n')\n self.wfile.write('withdraw route\\n')\n self.wfile.write(\n \"\"\" example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\n\"\"\"\n )\n self.wfile.write('announce flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: announce flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('withdraw flow\\n')\n self.wfile.write(\n \"\"\" exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\n\"\"\"\n )\n self.wfile.write(\n \"\"\" example: withdraw flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write(\n \"\"\"SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\n\"\"\"\n )\n self.wfile.write(\n \"\"\"AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\n\"\"\"\n )\n self.wfile.write('\\n')\n self.wfile.write('show neighbors\\n')\n self.wfile.write(' display the neighbor configured\\\\n\\n')\n self.wfile.write('show routes\\n')\n self.wfile.write(\n ' display routes which have been announced\\\\n\\n')\n self.wfile.write('\\n')\n self.wfile.flush()\n prompt = True\n elif command.startswith('announce '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('requested %s annoucement\\n' % command.\n split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('withdraw '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('request %s withdrawal\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('neighbor '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('neighbor %s requested\\n' % command.split(\n ' ')[1])\n self.wfile.flush()\n prompt = True\n elif command.startswith('show '):\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n self.wfile.write('%s requested\\n' % command.split(' ')[1])\n self.wfile.flush()\n prompt = True\n elif command in ['shutdown', 'reload', 'restart', 'version']:\n sys.stdout.write('%s\\n' % command)\n sys.stdout.flush()\n prompt = True\n elif command not in ['go', '']:\n self.wfile.write('unknown command [%s], try: help\\n' % command)\n self.wfile.flush()\n prompt = True\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n if r:\n self.wfile.write('\\n')\n while r:\n response = os.read(sys.stdin.fileno(), 4096)\n if not response:\n break\n self.wfile.write(response)\n prompt = True\n time.sleep(0.1)\n try:\n r, _, _ = select.select([sys.stdin], [], [], 1.0)\n except select.error:\n raise KeyboardInterrupt('SIGNAL received in select')\n continue\n\n\ndef timed(message):\n now = time.strftime('%a, %d %b %Y %H:%M:%S', time.localtime())\n return '%s | %-8s | %-6d | %-13s | %s' % (now, 'FORKED', os.getpid(),\n 'tcp-server', message)\n\n\ndef sig(signum, frame):\n os.kill(os.getpid(), signal.SIGKILL)\n\n\nsignal.signal(signal.SIGINT, sig)\nsignal.signal(signal.SIGTERM, sig)\ncount = 0\nconnected = False\n\n\nclass Server(TCPServer):\n\n def server_activate(self):\n print >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,\n port))\n sys.stderr.flush()\n TCPServer.server_activate(self)\n\n\nwhile not connected:\n try:\n server = Server((host, port), Control)\n connected = True\n except socket.error:\n count += 1\n if count % 1 == 0:\n print >> sys.stderr, timed(\n 'tcp-server still trying to bind to %s:%d' % (host, port))\n time.sleep(1)\nserver.serve_forever()\n",
"step-5": "#!/usr/bin/env python\n\nhost, port = \"localhost\", 9999\n\nimport os\nimport sys\nimport signal\nimport socket\nimport time\nimport select\n\nfrom SocketServer import TCPServer\nfrom SocketServer import StreamRequestHandler\n\nclass TimeoutException(Exception):\n\tpass\n\ndef read_command(rfile,wfile,prompt):\n\tdef timeout_handler(signum, frame):\n\t\traise TimeoutException()\n\n\tsignal.signal(signal.SIGALRM, timeout_handler)\n\tsignal.alarm(1)\n\n\ttry:\n\t\tif prompt:\n\t\t\twfile.write('\\n> ')\n\t\tc = rfile.readline()\n\texcept TimeoutException:\n\t\tc = ''\n\tfinally:\n\t\tsignal.alarm(0)\n\n\treturn c.strip()\n\nclass Control (StreamRequestHandler):\n\tallow_reuse_address = True\n\n\tdef handle(self):\n\t\tcommand = 'go'\n\t\tprompt = True\n\t\twhile command not in ['quit','exit']:\n\t\t\t# reading the command on TCP\n\t\t\t# relaying it to exabgp via the socket\n\t\t\tcommand = read_command(self.rfile,self.wfile,prompt)\n\t\t\tprompt = False\n\n\t\t\tif command in ['quit','exit']:\n\t\t\t\tcontinue\n\n\t\t\tif command in ['help','?']:\n\t\t\t\tself.wfile.write('exabgp tcp-control help\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('This program is just a way to manually enter commands using telnet\\n')\n\t\t\t\tself.wfile.write('routes and flows syntax are parsed like normal configuration\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('quit (close the telnet connection)\\n')\n\t\t\t\tself.wfile.write('exit (close the telnet connection)\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('version (returns the version of exabgp)\\n')\n\t\t\t\tself.wfile.write('reload (reload the configuration - cause exabgp to forget all routes learned via external processes)\\n')\n\t\t\t\tself.wfile.write('restart (reload the configuration and bounce all BGP session)\\n')\n\t\t\t\tself.wfile.write('shutdown (politely terminate all session and exit)\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('WARNING : The result of the following commands will depend on the route, it could even cause the BGP session to drop)\\n')\n\t\t\t\tself.wfile.write('WARNING : It could even cause the BGP session to drop, for example if you send flow routes to a router which does not support it\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('The route will be sent to ALL the peers (there is no way to filter the announcement yet)\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('annouce route\\n')\n\t\t\t\tself.wfile.write(' The multi-line syntax is currently not supported\\n')\n\t\t\t\tself.wfile.write(' example: announce route 1.2.3.4 next-hop 5.6.7.8\\n')\n\t\t\t\tself.wfile.write('withdraw route\\n')\n\t\t\t\tself.wfile.write(' example: withdraw route (example: withdraw route 1.2.3.4 next-hop 5.6.7.8)\\n')\n\t\t\t\tself.wfile.write('announce flow\\n')\n\t\t\t\tself.wfile.write(' exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\\n')\n\t\t\t\tself.wfile.write(' example: announce flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\\n')\n\t\t\t\tself.wfile.write('withdraw flow\\n')\n\t\t\t\tself.wfile.write(' exabgp does not have a single line flow syntax so you must use the multiline version indicating newlines with \\\\n\\n')\n\t\t\t\tself.wfile.write(' example: withdraw flow route {\\\\n match {\\\\n source 10.0.0.1/32;\\\\n destination 1.2.3.4/32;\\\\n }\\\\n then {\\\\n discard;\\\\n }\\\\n }\\\\n\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('SHOW COMMANDS SHOULD NOT BE USED IN PRODUCTION AS THEY HALT THE BGP ROUTE PROCESSING\\n')\n\t\t\t\tself.wfile.write('AND CAN RESULT IN BGP PEERING SESSION DROPPING - You have been warned\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.write('show neighbors\\n')\n\t\t\t\tself.wfile.write(' display the neighbor configured\\\\n\\n')\n\t\t\t\tself.wfile.write('show routes\\n')\n\t\t\t\tself.wfile.write(' display routes which have been announced\\\\n\\n')\n\t\t\t\tself.wfile.write('\\n')\n\t\t\t\tself.wfile.flush()\n\t\t\t\tprompt = True\n\n\t\t\telif command.startswith('announce '):\n\t\t\t\tsys.stdout.write('%s\\n' % command)\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tself.wfile.write('requested %s annoucement\\n' % command.split(' ')[1])\n\t\t\t\tself.wfile.flush()\n\t\t\t\tprompt = True\n\n\t\t\telif command.startswith('withdraw '):\n\t\t\t\tsys.stdout.write('%s\\n' % command)\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tself.wfile.write('request %s withdrawal\\n' % command.split(' ')[1])\n\t\t\t\tself.wfile.flush()\n\t\t\t\tprompt = True\n\n\t\t\telif command.startswith('neighbor '):\n\t\t\t\tsys.stdout.write('%s\\n' % command)\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tself.wfile.write('neighbor %s requested\\n' % command.split(' ')[1])\n\t\t\t\tself.wfile.flush()\n\t\t\t\tprompt = True\n\n\t\t\telif command.startswith('show '):\n\t\t\t\tsys.stdout.write('%s\\n' % command)\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tself.wfile.write('%s requested\\n' % command.split(' ')[1])\n\t\t\t\tself.wfile.flush()\n\t\t\t\tprompt = True\n\n\t\t\telif command in ['shutdown','reload','restart','version']:\n\t\t\t\tsys.stdout.write('%s\\n' % command)\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprompt = True\n\n\t\t\telif command not in ['go','']:\n\t\t\t\tself.wfile.write('unknown command [%s], try: help\\n' % command)\n\t\t\t\tself.wfile.flush()\n\t\t\t\tprompt = True\n\n\t\t\ttry:\n\t\t\t\tr,_,_ = select.select([sys.stdin], [], [], 1.0)\n\t\t\texcept select.error:\n\t\t\t\traise KeyboardInterrupt('SIGNAL received in select')\n\n\t\t\tif r:\n\t\t\t\tself.wfile.write('\\n')\n\n\t\t\twhile r:\n\t\t\t\t# Can not use readline with select.\n\t\t\t\t# From http://stackoverflow.com/questions/5486717/python-select-doesnt-signal-all-input-from-pipe\n\t\t\t\t# Note that internally file.readlines([size]) loops and invokes the read() syscall more than once, attempting to fill an internal buffer of size. The first call to read() will immediately return, since select() indicated the fd was readable. However the 2nd call will block until data is available, which defeats the purpose of using select. In any case it is tricky to use file.readlines([size]) in an asynchronous app.\n\t\t\t\tresponse = os.read(sys.stdin.fileno(),4096)\n\t\t\t\t# this should not happen as select informed us of data to read but it seems it does\n\t\t\t\tif not response:\n\t\t\t\t\tbreak\n\t\t\t\tself.wfile.write(response)\n\t\t\t\tprompt = True\n\t\t\t\ttime.sleep(0.1)\n\t\t\t\ttry:\n\t\t\t\t\tr,_,_ = select.select([sys.stdin], [], [], 1.0)\n\t\t\t\texcept select.error:\n\t\t\t\t\traise KeyboardInterrupt('SIGNAL received in select')\n\t\t\tcontinue\n\ndef timed (message):\n\tnow = time.strftime('%a, %d %b %Y %H:%M:%S',time.localtime())\n\treturn \"%s | %-8s | %-6d | %-13s | %s\" % (now,'FORKED',os.getpid(),'tcp-server',message)\n\ndef sig (signum, frame):\n\t# outch rude but prevent silly trace on exit if waiting for a read on stdin :p\n\tos.kill(os.getpid(),signal.SIGKILL)\n\nsignal.signal(signal.SIGINT, sig)\nsignal.signal(signal.SIGTERM, sig)\n\ncount = 0\nconnected = False\n\nclass Server (TCPServer):\n\tdef server_activate (self):\n\t\tprint >> sys.stderr, timed('tcp-server listening on %s:%d' % (host,port))\n\t\tsys.stderr.flush()\n\t\tTCPServer.server_activate(self)\n\nwhile not connected:\n\ttry:\n\t\tserver = Server((host, port), Control)\n\t\tconnected = True\n\texcept socket.error:\n\t\tcount += 1\n\t\tif count % 1 == 0:\n\t\t\tprint >> sys.stderr, timed('tcp-server still trying to bind to %s:%d' % (host,port))\n\t\t# we can not connect to the socket, retrying (happens if respawns too quickly)\n\t\ttime.sleep(1)\nserver.serve_forever()\n\n",
"step-ids": [
6,
9,
10,
11,
13
]
}
|
[
6,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
player_resource = PlayerResource()
game_resource = GameResource()
urlpatterns = [url('^$', views.index, name='index'), url('^api/', include(
player_resource.urls)), url('^api/', include(game_resource.urls))]
<|reserved_special_token_1|>
from django.conf.urls import url, include
from api.resources import PlayerResource, GameResource
from . import views
player_resource = PlayerResource()
game_resource = GameResource()
urlpatterns = [url('^$', views.index, name='index'), url('^api/', include(
player_resource.urls)), url('^api/', include(game_resource.urls))]
<|reserved_special_token_1|>
from django.conf.urls import url, include
from api.resources import PlayerResource, GameResource
from . import views
player_resource = PlayerResource()
game_resource = GameResource()
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^api/', include(player_resource.urls)),
url(r'^api/', include(game_resource.urls)),
]
|
flexible
|
{
"blob_id": "ff959a388438a6d9c6d418e28c676ec3fd196ea0",
"index": 6076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplayer_resource = PlayerResource()\ngame_resource = GameResource()\nurlpatterns = [url('^$', views.index, name='index'), url('^api/', include(\n player_resource.urls)), url('^api/', include(game_resource.urls))]\n",
"step-3": "from django.conf.urls import url, include\nfrom api.resources import PlayerResource, GameResource\nfrom . import views\nplayer_resource = PlayerResource()\ngame_resource = GameResource()\nurlpatterns = [url('^$', views.index, name='index'), url('^api/', include(\n player_resource.urls)), url('^api/', include(game_resource.urls))]\n",
"step-4": "from django.conf.urls import url, include\nfrom api.resources import PlayerResource, GameResource\nfrom . import views\n\nplayer_resource = PlayerResource()\ngame_resource = GameResource()\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^api/', include(player_resource.urls)),\n url(r'^api/', include(game_resource.urls)),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def getAllMembersFromDB(**kwargs):
"""Finds and returns all the registered members"""
isResponseParsed = kwargs.get('isParsed', False)
logging.info('Trying to find all the users')
try:
rawMembersData = Member.objects()
parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for
rawMember in rawMembersData]
logging.info('Found all the users')
if not isResponseParsed:
return parsedMembers
resp = [parsedMember.dict(exclude={'mongoDocument'}) for
parsedMember in parsedMembers]
return parseControllerResponse(data=resp, statuscode=200, message=
'Successfully found the users')
except Exception as e:
helpfulErrorMessage = "Couldn't find all the users due to " + e
logging.error(helpfulErrorMessage)
if isResponseParsed:
return parseControllerResponse(statuscode=500, message=
'Something went wrong, try again later', error=
helpfulErrorMessage)
raise helpfulErrorMessage
def getMemberFromDiscordHandle(discordHandle: str):
"""Finds and returns the user with the given discord handle, if
such a user doesn't exist, return None"""
try:
member_ = Member.objects(discordHandle=discordHandle).first()
assert member_
member = MemberInDBSchema(**memberHelper(member_))
return member
except AssertionError as _:
return None
except Exception as e:
raise Exception(
"Couldn't find a user with the discord handle {}, due to {}"
.format(discordHandle, e))
<|reserved_special_token_0|>
def getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):
"""Finds and returns the user with the given id, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
logging.info('Trying to find the user with the id=' + id)
try:
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the id={}'.format(memberHelper
(user), id))
logging.info('Found the user with id=' + id)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with id={} does not exist'.format(id))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with id={} does not exist'.format(id))
return None
except Exception as e:
helpfulErrorMsg = ("Couldn't find a user with the userId {}, due to {}"
.format(id, e))
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getAllMembersFromDB(**kwargs):
"""Finds and returns all the registered members"""
isResponseParsed = kwargs.get('isParsed', False)
logging.info('Trying to find all the users')
try:
rawMembersData = Member.objects()
parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for
rawMember in rawMembersData]
logging.info('Found all the users')
if not isResponseParsed:
return parsedMembers
resp = [parsedMember.dict(exclude={'mongoDocument'}) for
parsedMember in parsedMembers]
return parseControllerResponse(data=resp, statuscode=200, message=
'Successfully found the users')
except Exception as e:
helpfulErrorMessage = "Couldn't find all the users due to " + e
logging.error(helpfulErrorMessage)
if isResponseParsed:
return parseControllerResponse(statuscode=500, message=
'Something went wrong, try again later', error=
helpfulErrorMessage)
raise helpfulErrorMessage
def getMemberFromDiscordHandle(discordHandle: str):
"""Finds and returns the user with the given discord handle, if
such a user doesn't exist, return None"""
try:
member_ = Member.objects(discordHandle=discordHandle).first()
assert member_
member = MemberInDBSchema(**memberHelper(member_))
return member
except AssertionError as _:
return None
except Exception as e:
raise Exception(
"Couldn't find a user with the discord handle {}, due to {}"
.format(discordHandle, e))
def getMemberFromRollNumber(rollNumber: int, **kwargs):
"""Finds and returns the user with the given roll number, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
try:
user = Member.objects(rollno=rollNumber).first()
assert user
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the rollno={}'.format(
memberHelper(user), rollNumber))
logging.info('Found the user with rollNumber =' + rollNumber)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with roll numer={} does not exist'.format(
rollNumber))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with rollnumber={} does not exist'.format(rollNumber))
return None
except Exception as e:
helpfulErrorMsg = (
f"Couldn't find a user with the rollNumber = {rollNumber!r}, due to {e}"
)
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
def getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):
"""Finds and returns the user with the given id, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
logging.info('Trying to find the user with the id=' + id)
try:
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the id={}'.format(memberHelper
(user), id))
logging.info('Found the user with id=' + id)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with id={} does not exist'.format(id))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with id={} does not exist'.format(id))
return None
except Exception as e:
helpfulErrorMsg = ("Couldn't find a user with the userId {}, due to {}"
.format(id, e))
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getAllMembersFromDB(**kwargs):
"""Finds and returns all the registered members"""
isResponseParsed = kwargs.get('isParsed', False)
logging.info('Trying to find all the users')
try:
rawMembersData = Member.objects()
parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for
rawMember in rawMembersData]
logging.info('Found all the users')
if not isResponseParsed:
return parsedMembers
resp = [parsedMember.dict(exclude={'mongoDocument'}) for
parsedMember in parsedMembers]
return parseControllerResponse(data=resp, statuscode=200, message=
'Successfully found the users')
except Exception as e:
helpfulErrorMessage = "Couldn't find all the users due to " + e
logging.error(helpfulErrorMessage)
if isResponseParsed:
return parseControllerResponse(statuscode=500, message=
'Something went wrong, try again later', error=
helpfulErrorMessage)
raise helpfulErrorMessage
def getMemberFromDiscordHandle(discordHandle: str):
"""Finds and returns the user with the given discord handle, if
such a user doesn't exist, return None"""
try:
member_ = Member.objects(discordHandle=discordHandle).first()
assert member_
member = MemberInDBSchema(**memberHelper(member_))
return member
except AssertionError as _:
return None
except Exception as e:
raise Exception(
"Couldn't find a user with the discord handle {}, due to {}"
.format(discordHandle, e))
def getMemberFromRollNumber(rollNumber: int, **kwargs):
"""Finds and returns the user with the given roll number, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
try:
user = Member.objects(rollno=rollNumber).first()
assert user
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the rollno={}'.format(
memberHelper(user), rollNumber))
logging.info('Found the user with rollNumber =' + rollNumber)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with roll numer={} does not exist'.format(
rollNumber))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with rollnumber={} does not exist'.format(rollNumber))
return None
except Exception as e:
helpfulErrorMsg = (
f"Couldn't find a user with the rollNumber = {rollNumber!r}, due to {e}"
)
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
def getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):
"""Finds and returns the user with the given id, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
logging.info('Trying to find the user with the id=' + id)
try:
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the id={}'.format(memberHelper
(user), id))
logging.info('Found the user with id=' + id)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with id={} does not exist'.format(id))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with id={} does not exist'.format(id))
return None
except Exception as e:
helpfulErrorMsg = ("Couldn't find a user with the userId {}, due to {}"
.format(id, e))
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
def updateMemberWithGivenDetails(data: UpdateMemberSchema, userId: Union[
ObjectId, str], **kwargs):
"""Finds the user with the given data, and updates their details,
raises an error if the roll number is different"""
isResponseParsed = kwargs.get('isParsed', False)
try:
user: Member = getMemberWithGivenId(id=userId, rawData=True)
assert user, 'Not Found'
assert user.rollno == data.rollno, 'Roll Number Mismatch'
user.name = data.name if data.name else user.name
user.discordHandle = (data.discordHandle if data.discordHandle else
user.discordHandle)
user.batch = data.batch if data.batch else user.batch
if data.password:
user.password = CreateMemberSchema.hashGivenText(data.password)
user.save()
logging.info('successfully updated user data')
if isResponseParsed:
return parseControllerResponse(data=MemberInDBSchema(**
memberHelper(user)).dict(exclude={'mongoDocument'}),
statuscode=200, message='Successfully updated user details')
return True
except AssertionError as err:
if err == 'Not Found':
helpfulErrorMsg = f"A user with userId = {userId!r} doesn't exist"
logging.warn(helpfulErrorMsg)
if not isResponseParsed:
return None
return parseControllerResponse(data=None, statuscode=400,
message=helpfulErrorMsg, error=helpfulErrorMsg)
if err == 'Roll Number Mismatch':
helpfulErrorMsg = (
f"You cannot change a user's roll number after creating it.")
if not isResponseParsed:
return None
return parseControllerResponse(data=None, statuscode=400,
message=helpfulErrorMsg, error=helpfulErrorMsg)
except Exception as e:
helpfulErrorMsg = (
f"Couldn't update user={data.dict()} data, because e={e!r}")
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
<|reserved_special_token_1|>
import logging
from bson import ObjectId
from typing import Union
from app.helper import parseControllerResponse
from models.members import Member
from schema.members import CreateMemberSchema, MemberInDBSchema, UpdateMemberSchema, memberHelper
def getAllMembersFromDB(**kwargs):
"""Finds and returns all the registered members"""
isResponseParsed = kwargs.get('isParsed', False)
logging.info('Trying to find all the users')
try:
rawMembersData = Member.objects()
parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for
rawMember in rawMembersData]
logging.info('Found all the users')
if not isResponseParsed:
return parsedMembers
resp = [parsedMember.dict(exclude={'mongoDocument'}) for
parsedMember in parsedMembers]
return parseControllerResponse(data=resp, statuscode=200, message=
'Successfully found the users')
except Exception as e:
helpfulErrorMessage = "Couldn't find all the users due to " + e
logging.error(helpfulErrorMessage)
if isResponseParsed:
return parseControllerResponse(statuscode=500, message=
'Something went wrong, try again later', error=
helpfulErrorMessage)
raise helpfulErrorMessage
def getMemberFromDiscordHandle(discordHandle: str):
"""Finds and returns the user with the given discord handle, if
such a user doesn't exist, return None"""
try:
member_ = Member.objects(discordHandle=discordHandle).first()
assert member_
member = MemberInDBSchema(**memberHelper(member_))
return member
except AssertionError as _:
return None
except Exception as e:
raise Exception(
"Couldn't find a user with the discord handle {}, due to {}"
.format(discordHandle, e))
def getMemberFromRollNumber(rollNumber: int, **kwargs):
"""Finds and returns the user with the given roll number, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
try:
user = Member.objects(rollno=rollNumber).first()
assert user
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the rollno={}'.format(
memberHelper(user), rollNumber))
logging.info('Found the user with rollNumber =' + rollNumber)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with roll numer={} does not exist'.format(
rollNumber))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with rollnumber={} does not exist'.format(rollNumber))
return None
except Exception as e:
helpfulErrorMsg = (
f"Couldn't find a user with the rollNumber = {rollNumber!r}, due to {e}"
)
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
def getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):
"""Finds and returns the user with the given id, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get('isParsed', False)
rawData = kwargs.get('rawData', False)
logging.info('Trying to find the user with the id=' + id)
try:
user = Member.objects(id=id).first()
assert user
logging.debug('Found a user {}, with the id={}'.format(memberHelper
(user), id))
logging.info('Found the user with id=' + id)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(data=MemberInDBSchema(**memberHelper
(user)).dict(exclude={'mongoDocument'}), statuscode=200,
message='Successfully found the user')
except AssertionError as _:
logging.info('A user with id={} does not exist'.format(id))
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=404,
message='User not found', error=
'A user with id={} does not exist'.format(id))
return None
except Exception as e:
helpfulErrorMsg = ("Couldn't find a user with the userId {}, due to {}"
.format(id, e))
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
def updateMemberWithGivenDetails(data: UpdateMemberSchema, userId: Union[
ObjectId, str], **kwargs):
"""Finds the user with the given data, and updates their details,
raises an error if the roll number is different"""
isResponseParsed = kwargs.get('isParsed', False)
try:
user: Member = getMemberWithGivenId(id=userId, rawData=True)
assert user, 'Not Found'
assert user.rollno == data.rollno, 'Roll Number Mismatch'
user.name = data.name if data.name else user.name
user.discordHandle = (data.discordHandle if data.discordHandle else
user.discordHandle)
user.batch = data.batch if data.batch else user.batch
if data.password:
user.password = CreateMemberSchema.hashGivenText(data.password)
user.save()
logging.info('successfully updated user data')
if isResponseParsed:
return parseControllerResponse(data=MemberInDBSchema(**
memberHelper(user)).dict(exclude={'mongoDocument'}),
statuscode=200, message='Successfully updated user details')
return True
except AssertionError as err:
if err == 'Not Found':
helpfulErrorMsg = f"A user with userId = {userId!r} doesn't exist"
logging.warn(helpfulErrorMsg)
if not isResponseParsed:
return None
return parseControllerResponse(data=None, statuscode=400,
message=helpfulErrorMsg, error=helpfulErrorMsg)
if err == 'Roll Number Mismatch':
helpfulErrorMsg = (
f"You cannot change a user's roll number after creating it.")
if not isResponseParsed:
return None
return parseControllerResponse(data=None, statuscode=400,
message=helpfulErrorMsg, error=helpfulErrorMsg)
except Exception as e:
helpfulErrorMsg = (
f"Couldn't update user={data.dict()} data, because e={e!r}")
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(data=None, statuscode=500,
message='Something went wrong, try again later.', error=
helpfulErrorMsg)
raise helpfulErrorMsg
<|reserved_special_token_1|>
import logging
from bson import ObjectId
from typing import Union
from app.helper import parseControllerResponse
from models.members import Member
from schema.members import (
CreateMemberSchema,
MemberInDBSchema,
UpdateMemberSchema,
memberHelper,
)
def getAllMembersFromDB(**kwargs):
"""Finds and returns all the registered members"""
isResponseParsed = kwargs.get("isParsed", False)
logging.info("Trying to find all the users")
try:
rawMembersData = Member.objects()
parsedMembers = [
MemberInDBSchema(**memberHelper(rawMember)) for rawMember in rawMembersData
]
logging.info("Found all the users")
if not isResponseParsed:
return parsedMembers
resp = [
parsedMember.dict(exclude={"mongoDocument"})
for parsedMember in parsedMembers
]
return parseControllerResponse(
data=resp, statuscode=200, message="Successfully found the users"
)
except Exception as e:
helpfulErrorMessage = "Couldn't find all the users due to " + e
logging.error(helpfulErrorMessage)
if isResponseParsed:
return parseControllerResponse(
statuscode=500,
message="Something went wrong, try again later",
error=helpfulErrorMessage,
)
raise helpfulErrorMessage
def getMemberFromDiscordHandle(discordHandle: str):
"""Finds and returns the user with the given discord handle, if
such a user doesn't exist, return None"""
try:
member_ = Member.objects(discordHandle=discordHandle).first()
assert member_
member = MemberInDBSchema(**memberHelper(member_))
return member
except AssertionError as _:
# if the member is not found, raise a ValueError
return None
except Exception as e:
raise Exception(
"Couldn't find a user with the discord handle \
{}, due to {}".format(
discordHandle, e
)
)
def getMemberFromRollNumber(rollNumber: int, **kwargs):
"""Finds and returns the user with the given roll number, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get("isParsed", False)
rawData = kwargs.get("rawData", False)
try:
user = Member.objects(rollno=rollNumber).first()
assert user
user = Member.objects(id=id).first()
assert user
logging.debug(
"Found a user {}, with the rollno={}".format(memberHelper(user), rollNumber)
)
logging.info("Found the user with rollNumber =" + rollNumber)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(
data=(MemberInDBSchema(**memberHelper(user))).dict(
exclude={"mongoDocument"}
),
statuscode=200,
message="Successfully found the user",
)
except AssertionError as _:
# user was not found, return none or parsed response
# ! its the person who called this func's responsibility to create an error
logging.info("A user with roll numer={} does not exist".format(rollNumber))
if isResponseParsed:
return parseControllerResponse(
data=None,
statuscode=404,
message="User not found",
error="A user with rollnumber={} does not exist".format(rollNumber),
)
return None
except Exception as e:
helpfulErrorMsg = f"Couldn't find a user with the {rollNumber = }, due to {e}"
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(
data=None,
statuscode=500,
message="Something went wrong, try again later.",
error=helpfulErrorMsg,
)
raise helpfulErrorMsg
def getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):
"""Finds and returns the user with the given id, if
such a user doesn't exist, return None"""
isResponseParsed = kwargs.get("isParsed", False)
rawData = kwargs.get("rawData", False)
logging.info("Trying to find the user with the id=" + id)
try:
user = Member.objects(id=id).first()
assert user
logging.debug("Found a user {}, with the id={}".format(memberHelper(user), id))
logging.info("Found the user with id=" + id)
if not isResponseParsed:
return user if rawData else MemberInDBSchema(**memberHelper(user))
return parseControllerResponse(
data=(MemberInDBSchema(**memberHelper(user))).dict(
exclude={"mongoDocument"}
),
statuscode=200,
message="Successfully found the user",
)
except AssertionError as _:
# user was not found, return none or parsed response
logging.info("A user with id={} does not exist".format(id))
if isResponseParsed:
return parseControllerResponse(
data=None,
statuscode=404,
message="User not found",
error="A user with id={} does not exist".format(id),
)
return None
except Exception as e:
helpfulErrorMsg = "Couldn't find a user with the userId {}, due to {}".format(
id, e
)
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(
data=None,
statuscode=500,
message="Something went wrong, try again later.",
error=helpfulErrorMsg,
)
raise helpfulErrorMsg
def updateMemberWithGivenDetails(
data: UpdateMemberSchema, userId: Union[ObjectId, str], **kwargs
):
"""Finds the user with the given data, and updates their details,
raises an error if the roll number is different"""
isResponseParsed = kwargs.get("isParsed", False)
try:
user: Member = getMemberWithGivenId(id=userId, rawData=True)
assert user, "Not Found"
# A user cannot change roll number after creating a doc
assert user.rollno == data.rollno, "Roll Number Mismatch"
user.name = data.name if data.name else user.name
user.discordHandle = (
data.discordHandle if data.discordHandle else user.discordHandle
)
user.batch = data.batch if data.batch else user.batch
if data.password:
user.password = CreateMemberSchema.hashGivenText(data.password)
user.save()
logging.info("successfully updated user data")
if isResponseParsed:
return parseControllerResponse(
data=(MemberInDBSchema(**memberHelper(user))).dict(
exclude={"mongoDocument"}
),
statuscode=200,
message="Successfully updated user details",
)
return True
except AssertionError as err:
if err == "Not Found":
helpfulErrorMsg = f"A user with {userId = } doesn't exist"
logging.warn(helpfulErrorMsg)
if not isResponseParsed:
return None
return parseControllerResponse(
data=None,
statuscode=400,
message=helpfulErrorMsg,
error=helpfulErrorMsg,
)
if err == "Roll Number Mismatch":
helpfulErrorMsg = (
f"You cannot change a user's roll number after creating it."
)
if not isResponseParsed:
return None
return parseControllerResponse(
data=None,
statuscode=400,
message=helpfulErrorMsg,
error=helpfulErrorMsg,
)
except Exception as e:
helpfulErrorMsg = f"Couldn't update user={data.dict()} data, because {e=}"
logging.error(helpfulErrorMsg)
if isResponseParsed:
return parseControllerResponse(
data=None,
statuscode=500,
message="Something went wrong, try again later.",
error=helpfulErrorMsg,
)
raise helpfulErrorMsg
|
flexible
|
{
"blob_id": "95f9e9a8f681679f56c3755199fba7d654af85e8",
"index": 1937,
"step-1": "<mask token>\n\n\ndef getAllMembersFromDB(**kwargs):\n \"\"\"Finds and returns all the registered members\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n logging.info('Trying to find all the users')\n try:\n rawMembersData = Member.objects()\n parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for\n rawMember in rawMembersData]\n logging.info('Found all the users')\n if not isResponseParsed:\n return parsedMembers\n resp = [parsedMember.dict(exclude={'mongoDocument'}) for\n parsedMember in parsedMembers]\n return parseControllerResponse(data=resp, statuscode=200, message=\n 'Successfully found the users')\n except Exception as e:\n helpfulErrorMessage = \"Couldn't find all the users due to \" + e\n logging.error(helpfulErrorMessage)\n if isResponseParsed:\n return parseControllerResponse(statuscode=500, message=\n 'Something went wrong, try again later', error=\n helpfulErrorMessage)\n raise helpfulErrorMessage\n\n\ndef getMemberFromDiscordHandle(discordHandle: str):\n \"\"\"Finds and returns the user with the given discord handle, if\n such a user doesn't exist, return None\"\"\"\n try:\n member_ = Member.objects(discordHandle=discordHandle).first()\n assert member_\n member = MemberInDBSchema(**memberHelper(member_))\n return member\n except AssertionError as _:\n return None\n except Exception as e:\n raise Exception(\n \"Couldn't find a user with the discord handle {}, due to {}\"\n .format(discordHandle, e))\n\n\n<mask token>\n\n\ndef getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):\n \"\"\"Finds and returns the user with the given id, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n logging.info('Trying to find the user with the id=' + id)\n try:\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the id={}'.format(memberHelper\n (user), id))\n logging.info('Found the user with id=' + id)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with id={} does not exist'.format(id))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with id={} does not exist'.format(id))\n return None\n except Exception as e:\n helpfulErrorMsg = (\"Couldn't find a user with the userId {}, due to {}\"\n .format(id, e))\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getAllMembersFromDB(**kwargs):\n \"\"\"Finds and returns all the registered members\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n logging.info('Trying to find all the users')\n try:\n rawMembersData = Member.objects()\n parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for\n rawMember in rawMembersData]\n logging.info('Found all the users')\n if not isResponseParsed:\n return parsedMembers\n resp = [parsedMember.dict(exclude={'mongoDocument'}) for\n parsedMember in parsedMembers]\n return parseControllerResponse(data=resp, statuscode=200, message=\n 'Successfully found the users')\n except Exception as e:\n helpfulErrorMessage = \"Couldn't find all the users due to \" + e\n logging.error(helpfulErrorMessage)\n if isResponseParsed:\n return parseControllerResponse(statuscode=500, message=\n 'Something went wrong, try again later', error=\n helpfulErrorMessage)\n raise helpfulErrorMessage\n\n\ndef getMemberFromDiscordHandle(discordHandle: str):\n \"\"\"Finds and returns the user with the given discord handle, if\n such a user doesn't exist, return None\"\"\"\n try:\n member_ = Member.objects(discordHandle=discordHandle).first()\n assert member_\n member = MemberInDBSchema(**memberHelper(member_))\n return member\n except AssertionError as _:\n return None\n except Exception as e:\n raise Exception(\n \"Couldn't find a user with the discord handle {}, due to {}\"\n .format(discordHandle, e))\n\n\ndef getMemberFromRollNumber(rollNumber: int, **kwargs):\n \"\"\"Finds and returns the user with the given roll number, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n try:\n user = Member.objects(rollno=rollNumber).first()\n assert user\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the rollno={}'.format(\n memberHelper(user), rollNumber))\n logging.info('Found the user with rollNumber =' + rollNumber)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with roll numer={} does not exist'.format(\n rollNumber))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with rollnumber={} does not exist'.format(rollNumber))\n return None\n except Exception as e:\n helpfulErrorMsg = (\n f\"Couldn't find a user with the rollNumber = {rollNumber!r}, due to {e}\"\n )\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\ndef getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):\n \"\"\"Finds and returns the user with the given id, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n logging.info('Trying to find the user with the id=' + id)\n try:\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the id={}'.format(memberHelper\n (user), id))\n logging.info('Found the user with id=' + id)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with id={} does not exist'.format(id))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with id={} does not exist'.format(id))\n return None\n except Exception as e:\n helpfulErrorMsg = (\"Couldn't find a user with the userId {}, due to {}\"\n .format(id, e))\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getAllMembersFromDB(**kwargs):\n \"\"\"Finds and returns all the registered members\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n logging.info('Trying to find all the users')\n try:\n rawMembersData = Member.objects()\n parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for\n rawMember in rawMembersData]\n logging.info('Found all the users')\n if not isResponseParsed:\n return parsedMembers\n resp = [parsedMember.dict(exclude={'mongoDocument'}) for\n parsedMember in parsedMembers]\n return parseControllerResponse(data=resp, statuscode=200, message=\n 'Successfully found the users')\n except Exception as e:\n helpfulErrorMessage = \"Couldn't find all the users due to \" + e\n logging.error(helpfulErrorMessage)\n if isResponseParsed:\n return parseControllerResponse(statuscode=500, message=\n 'Something went wrong, try again later', error=\n helpfulErrorMessage)\n raise helpfulErrorMessage\n\n\ndef getMemberFromDiscordHandle(discordHandle: str):\n \"\"\"Finds and returns the user with the given discord handle, if\n such a user doesn't exist, return None\"\"\"\n try:\n member_ = Member.objects(discordHandle=discordHandle).first()\n assert member_\n member = MemberInDBSchema(**memberHelper(member_))\n return member\n except AssertionError as _:\n return None\n except Exception as e:\n raise Exception(\n \"Couldn't find a user with the discord handle {}, due to {}\"\n .format(discordHandle, e))\n\n\ndef getMemberFromRollNumber(rollNumber: int, **kwargs):\n \"\"\"Finds and returns the user with the given roll number, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n try:\n user = Member.objects(rollno=rollNumber).first()\n assert user\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the rollno={}'.format(\n memberHelper(user), rollNumber))\n logging.info('Found the user with rollNumber =' + rollNumber)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with roll numer={} does not exist'.format(\n rollNumber))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with rollnumber={} does not exist'.format(rollNumber))\n return None\n except Exception as e:\n helpfulErrorMsg = (\n f\"Couldn't find a user with the rollNumber = {rollNumber!r}, due to {e}\"\n )\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\ndef getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):\n \"\"\"Finds and returns the user with the given id, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n logging.info('Trying to find the user with the id=' + id)\n try:\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the id={}'.format(memberHelper\n (user), id))\n logging.info('Found the user with id=' + id)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with id={} does not exist'.format(id))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with id={} does not exist'.format(id))\n return None\n except Exception as e:\n helpfulErrorMsg = (\"Couldn't find a user with the userId {}, due to {}\"\n .format(id, e))\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\ndef updateMemberWithGivenDetails(data: UpdateMemberSchema, userId: Union[\n ObjectId, str], **kwargs):\n \"\"\"Finds the user with the given data, and updates their details,\n raises an error if the roll number is different\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n try:\n user: Member = getMemberWithGivenId(id=userId, rawData=True)\n assert user, 'Not Found'\n assert user.rollno == data.rollno, 'Roll Number Mismatch'\n user.name = data.name if data.name else user.name\n user.discordHandle = (data.discordHandle if data.discordHandle else\n user.discordHandle)\n user.batch = data.batch if data.batch else user.batch\n if data.password:\n user.password = CreateMemberSchema.hashGivenText(data.password)\n user.save()\n logging.info('successfully updated user data')\n if isResponseParsed:\n return parseControllerResponse(data=MemberInDBSchema(**\n memberHelper(user)).dict(exclude={'mongoDocument'}),\n statuscode=200, message='Successfully updated user details')\n return True\n except AssertionError as err:\n if err == 'Not Found':\n helpfulErrorMsg = f\"A user with userId = {userId!r} doesn't exist\"\n logging.warn(helpfulErrorMsg)\n if not isResponseParsed:\n return None\n return parseControllerResponse(data=None, statuscode=400,\n message=helpfulErrorMsg, error=helpfulErrorMsg)\n if err == 'Roll Number Mismatch':\n helpfulErrorMsg = (\n f\"You cannot change a user's roll number after creating it.\")\n if not isResponseParsed:\n return None\n return parseControllerResponse(data=None, statuscode=400,\n message=helpfulErrorMsg, error=helpfulErrorMsg)\n except Exception as e:\n helpfulErrorMsg = (\n f\"Couldn't update user={data.dict()} data, because e={e!r}\")\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n",
"step-4": "import logging\nfrom bson import ObjectId\nfrom typing import Union\nfrom app.helper import parseControllerResponse\nfrom models.members import Member\nfrom schema.members import CreateMemberSchema, MemberInDBSchema, UpdateMemberSchema, memberHelper\n\n\ndef getAllMembersFromDB(**kwargs):\n \"\"\"Finds and returns all the registered members\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n logging.info('Trying to find all the users')\n try:\n rawMembersData = Member.objects()\n parsedMembers = [MemberInDBSchema(**memberHelper(rawMember)) for\n rawMember in rawMembersData]\n logging.info('Found all the users')\n if not isResponseParsed:\n return parsedMembers\n resp = [parsedMember.dict(exclude={'mongoDocument'}) for\n parsedMember in parsedMembers]\n return parseControllerResponse(data=resp, statuscode=200, message=\n 'Successfully found the users')\n except Exception as e:\n helpfulErrorMessage = \"Couldn't find all the users due to \" + e\n logging.error(helpfulErrorMessage)\n if isResponseParsed:\n return parseControllerResponse(statuscode=500, message=\n 'Something went wrong, try again later', error=\n helpfulErrorMessage)\n raise helpfulErrorMessage\n\n\ndef getMemberFromDiscordHandle(discordHandle: str):\n \"\"\"Finds and returns the user with the given discord handle, if\n such a user doesn't exist, return None\"\"\"\n try:\n member_ = Member.objects(discordHandle=discordHandle).first()\n assert member_\n member = MemberInDBSchema(**memberHelper(member_))\n return member\n except AssertionError as _:\n return None\n except Exception as e:\n raise Exception(\n \"Couldn't find a user with the discord handle {}, due to {}\"\n .format(discordHandle, e))\n\n\ndef getMemberFromRollNumber(rollNumber: int, **kwargs):\n \"\"\"Finds and returns the user with the given roll number, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n try:\n user = Member.objects(rollno=rollNumber).first()\n assert user\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the rollno={}'.format(\n memberHelper(user), rollNumber))\n logging.info('Found the user with rollNumber =' + rollNumber)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with roll numer={} does not exist'.format(\n rollNumber))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with rollnumber={} does not exist'.format(rollNumber))\n return None\n except Exception as e:\n helpfulErrorMsg = (\n f\"Couldn't find a user with the rollNumber = {rollNumber!r}, due to {e}\"\n )\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\ndef getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):\n \"\"\"Finds and returns the user with the given id, if\n such a user doesn't exist, return None\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n rawData = kwargs.get('rawData', False)\n logging.info('Trying to find the user with the id=' + id)\n try:\n user = Member.objects(id=id).first()\n assert user\n logging.debug('Found a user {}, with the id={}'.format(memberHelper\n (user), id))\n logging.info('Found the user with id=' + id)\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n return parseControllerResponse(data=MemberInDBSchema(**memberHelper\n (user)).dict(exclude={'mongoDocument'}), statuscode=200,\n message='Successfully found the user')\n except AssertionError as _:\n logging.info('A user with id={} does not exist'.format(id))\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=404,\n message='User not found', error=\n 'A user with id={} does not exist'.format(id))\n return None\n except Exception as e:\n helpfulErrorMsg = (\"Couldn't find a user with the userId {}, due to {}\"\n .format(id, e))\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n\n\ndef updateMemberWithGivenDetails(data: UpdateMemberSchema, userId: Union[\n ObjectId, str], **kwargs):\n \"\"\"Finds the user with the given data, and updates their details,\n raises an error if the roll number is different\"\"\"\n isResponseParsed = kwargs.get('isParsed', False)\n try:\n user: Member = getMemberWithGivenId(id=userId, rawData=True)\n assert user, 'Not Found'\n assert user.rollno == data.rollno, 'Roll Number Mismatch'\n user.name = data.name if data.name else user.name\n user.discordHandle = (data.discordHandle if data.discordHandle else\n user.discordHandle)\n user.batch = data.batch if data.batch else user.batch\n if data.password:\n user.password = CreateMemberSchema.hashGivenText(data.password)\n user.save()\n logging.info('successfully updated user data')\n if isResponseParsed:\n return parseControllerResponse(data=MemberInDBSchema(**\n memberHelper(user)).dict(exclude={'mongoDocument'}),\n statuscode=200, message='Successfully updated user details')\n return True\n except AssertionError as err:\n if err == 'Not Found':\n helpfulErrorMsg = f\"A user with userId = {userId!r} doesn't exist\"\n logging.warn(helpfulErrorMsg)\n if not isResponseParsed:\n return None\n return parseControllerResponse(data=None, statuscode=400,\n message=helpfulErrorMsg, error=helpfulErrorMsg)\n if err == 'Roll Number Mismatch':\n helpfulErrorMsg = (\n f\"You cannot change a user's roll number after creating it.\")\n if not isResponseParsed:\n return None\n return parseControllerResponse(data=None, statuscode=400,\n message=helpfulErrorMsg, error=helpfulErrorMsg)\n except Exception as e:\n helpfulErrorMsg = (\n f\"Couldn't update user={data.dict()} data, because e={e!r}\")\n logging.error(helpfulErrorMsg)\n if isResponseParsed:\n return parseControllerResponse(data=None, statuscode=500,\n message='Something went wrong, try again later.', error=\n helpfulErrorMsg)\n raise helpfulErrorMsg\n",
"step-5": "import logging\nfrom bson import ObjectId\nfrom typing import Union\n\nfrom app.helper import parseControllerResponse\n\nfrom models.members import Member\nfrom schema.members import (\n CreateMemberSchema,\n MemberInDBSchema,\n UpdateMemberSchema,\n memberHelper,\n)\n\n\ndef getAllMembersFromDB(**kwargs):\n \"\"\"Finds and returns all the registered members\"\"\"\n\n isResponseParsed = kwargs.get(\"isParsed\", False)\n logging.info(\"Trying to find all the users\")\n\n try:\n rawMembersData = Member.objects()\n\n parsedMembers = [\n MemberInDBSchema(**memberHelper(rawMember)) for rawMember in rawMembersData\n ]\n\n logging.info(\"Found all the users\")\n if not isResponseParsed:\n return parsedMembers\n\n resp = [\n parsedMember.dict(exclude={\"mongoDocument\"})\n for parsedMember in parsedMembers\n ]\n return parseControllerResponse(\n data=resp, statuscode=200, message=\"Successfully found the users\"\n )\n\n except Exception as e:\n helpfulErrorMessage = \"Couldn't find all the users due to \" + e\n\n logging.error(helpfulErrorMessage)\n if isResponseParsed:\n return parseControllerResponse(\n statuscode=500,\n message=\"Something went wrong, try again later\",\n error=helpfulErrorMessage,\n )\n raise helpfulErrorMessage\n\n\ndef getMemberFromDiscordHandle(discordHandle: str):\n \"\"\"Finds and returns the user with the given discord handle, if\n such a user doesn't exist, return None\"\"\"\n try:\n member_ = Member.objects(discordHandle=discordHandle).first()\n assert member_\n member = MemberInDBSchema(**memberHelper(member_))\n return member\n except AssertionError as _:\n # if the member is not found, raise a ValueError\n return None\n except Exception as e:\n raise Exception(\n \"Couldn't find a user with the discord handle \\\n {}, due to {}\".format(\n discordHandle, e\n )\n )\n\n\ndef getMemberFromRollNumber(rollNumber: int, **kwargs):\n \"\"\"Finds and returns the user with the given roll number, if\n such a user doesn't exist, return None\"\"\"\n\n isResponseParsed = kwargs.get(\"isParsed\", False)\n rawData = kwargs.get(\"rawData\", False)\n\n try:\n user = Member.objects(rollno=rollNumber).first()\n assert user\n\n user = Member.objects(id=id).first()\n\n assert user\n\n logging.debug(\n \"Found a user {}, with the rollno={}\".format(memberHelper(user), rollNumber)\n )\n logging.info(\"Found the user with rollNumber =\" + rollNumber)\n\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n\n return parseControllerResponse(\n data=(MemberInDBSchema(**memberHelper(user))).dict(\n exclude={\"mongoDocument\"}\n ),\n statuscode=200,\n message=\"Successfully found the user\",\n )\n\n except AssertionError as _:\n # user was not found, return none or parsed response\n # ! its the person who called this func's responsibility to create an error\n logging.info(\"A user with roll numer={} does not exist\".format(rollNumber))\n\n if isResponseParsed:\n return parseControllerResponse(\n data=None,\n statuscode=404,\n message=\"User not found\",\n error=\"A user with rollnumber={} does not exist\".format(rollNumber),\n )\n return None\n except Exception as e:\n helpfulErrorMsg = f\"Couldn't find a user with the {rollNumber = }, due to {e}\"\n\n logging.error(helpfulErrorMsg)\n\n if isResponseParsed:\n return parseControllerResponse(\n data=None,\n statuscode=500,\n message=\"Something went wrong, try again later.\",\n error=helpfulErrorMsg,\n )\n raise helpfulErrorMsg\n\n\ndef getMemberWithGivenId(id: Union[str, ObjectId], **kwargs):\n \"\"\"Finds and returns the user with the given id, if\n such a user doesn't exist, return None\"\"\"\n\n isResponseParsed = kwargs.get(\"isParsed\", False)\n rawData = kwargs.get(\"rawData\", False)\n\n logging.info(\"Trying to find the user with the id=\" + id)\n try:\n\n user = Member.objects(id=id).first()\n\n assert user\n\n logging.debug(\"Found a user {}, with the id={}\".format(memberHelper(user), id))\n logging.info(\"Found the user with id=\" + id)\n\n if not isResponseParsed:\n return user if rawData else MemberInDBSchema(**memberHelper(user))\n\n return parseControllerResponse(\n data=(MemberInDBSchema(**memberHelper(user))).dict(\n exclude={\"mongoDocument\"}\n ),\n statuscode=200,\n message=\"Successfully found the user\",\n )\n\n except AssertionError as _:\n # user was not found, return none or parsed response\n logging.info(\"A user with id={} does not exist\".format(id))\n\n if isResponseParsed:\n return parseControllerResponse(\n data=None,\n statuscode=404,\n message=\"User not found\",\n error=\"A user with id={} does not exist\".format(id),\n )\n return None\n\n except Exception as e:\n helpfulErrorMsg = \"Couldn't find a user with the userId {}, due to {}\".format(\n id, e\n )\n logging.error(helpfulErrorMsg)\n\n if isResponseParsed:\n return parseControllerResponse(\n data=None,\n statuscode=500,\n message=\"Something went wrong, try again later.\",\n error=helpfulErrorMsg,\n )\n raise helpfulErrorMsg\n\n\ndef updateMemberWithGivenDetails(\n data: UpdateMemberSchema, userId: Union[ObjectId, str], **kwargs\n):\n \"\"\"Finds the user with the given data, and updates their details,\n raises an error if the roll number is different\"\"\"\n\n isResponseParsed = kwargs.get(\"isParsed\", False)\n\n try:\n user: Member = getMemberWithGivenId(id=userId, rawData=True)\n\n assert user, \"Not Found\"\n\n # A user cannot change roll number after creating a doc\n assert user.rollno == data.rollno, \"Roll Number Mismatch\"\n\n user.name = data.name if data.name else user.name\n user.discordHandle = (\n data.discordHandle if data.discordHandle else user.discordHandle\n )\n user.batch = data.batch if data.batch else user.batch\n\n if data.password:\n user.password = CreateMemberSchema.hashGivenText(data.password)\n\n user.save()\n\n logging.info(\"successfully updated user data\")\n\n if isResponseParsed:\n return parseControllerResponse(\n data=(MemberInDBSchema(**memberHelper(user))).dict(\n exclude={\"mongoDocument\"}\n ),\n statuscode=200,\n message=\"Successfully updated user details\",\n )\n\n return True\n\n except AssertionError as err:\n if err == \"Not Found\":\n helpfulErrorMsg = f\"A user with {userId = } doesn't exist\"\n logging.warn(helpfulErrorMsg)\n if not isResponseParsed:\n return None\n return parseControllerResponse(\n data=None,\n statuscode=400,\n message=helpfulErrorMsg,\n error=helpfulErrorMsg,\n )\n if err == \"Roll Number Mismatch\":\n helpfulErrorMsg = (\n f\"You cannot change a user's roll number after creating it.\"\n )\n if not isResponseParsed:\n return None\n return parseControllerResponse(\n data=None,\n statuscode=400,\n message=helpfulErrorMsg,\n error=helpfulErrorMsg,\n )\n\n except Exception as e:\n helpfulErrorMsg = f\"Couldn't update user={data.dict()} data, because {e=}\"\n\n logging.error(helpfulErrorMsg)\n\n if isResponseParsed:\n return parseControllerResponse(\n data=None,\n statuscode=500,\n message=\"Something went wrong, try again later.\",\n error=helpfulErrorMsg,\n )\n raise helpfulErrorMsg\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 2.2.16 on 2020-11-04 12:48
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0002_auto_20201103_1648'),
]
operations = [
migrations.AddField(
model_name='course',
name='course_video',
field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),
),
migrations.AlterField(
model_name='course',
name='brief',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),
),
]
|
normal
|
{
"blob_id": "afacc2c54584c070963c4cb3cabbae64bb0e3159",
"index": 1858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('course', '0002_auto_20201103_1648')]\n operations = [migrations.AddField(model_name='course', name=\n 'course_video', field=models.FileField(blank=True, max_length=255,\n null=True, upload_to='video', verbose_name='封面视频')), migrations.\n AlterField(model_name='course', name='brief', field=\n ckeditor_uploader.fields.RichTextUploadingField(blank=True,\n max_length=2048, null=True, verbose_name='详情介绍'))]\n",
"step-4": "import ckeditor_uploader.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('course', '0002_auto_20201103_1648')]\n operations = [migrations.AddField(model_name='course', name=\n 'course_video', field=models.FileField(blank=True, max_length=255,\n null=True, upload_to='video', verbose_name='封面视频')), migrations.\n AlterField(model_name='course', name='brief', field=\n ckeditor_uploader.fields.RichTextUploadingField(blank=True,\n max_length=2048, null=True, verbose_name='详情介绍'))]\n",
"step-5": "# Generated by Django 2.2.16 on 2020-11-04 12:48\n\nimport ckeditor_uploader.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('course', '0002_auto_20201103_1648'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='course',\n name='course_video',\n field=models.FileField(blank=True, max_length=255, null=True, upload_to='video', verbose_name='封面视频'),\n ),\n migrations.AlterField(\n model_name='course',\n name='brief',\n field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, max_length=2048, null=True, verbose_name='详情介绍'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
from sqlalchemy import Column, ForeignKey, Integer, String, Float, Boolean, DateTime
from sqlalchemy import and_, or_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker, scoped_session, load_only
from sqlalchemy.pool import NullPool
from datetime import datetime
Base = declarative_base()
days = ['M','T','W','T','F', 'S', 'S']
# using sqlalchemy declare a class for each table in our database.
class Station(Base):
"""this one is for storing information about each station."""
__tablename__ = "station"
number = Column(Integer, primary_key=True, autoincrement=False)
contract_name = Column(String(250), nullable=False)
name = Column(String(250), nullable=False)
address = Column(String(250), nullable=False)
position_lat = Column(Float, nullable=False)
position_long = Column(Float, nullable=False)
banking = Column(Boolean, nullable=True)
bonus = Column(Boolean, nullable=True)
station_usage = relationship("UsageData", lazy="dynamic")
@property
def last_updated(self):
"""this method is used in the scraper to return the last updated station.
this lets us pull only updated data."""
try:
return max(self.station_usage, key=lambda x: x.last_update).dt_last_update
except ValueError:
return datetime.fromtimestamp(0)
@classmethod
def get_current_station_info(cls, dbsession):
"""as the method name suggests this returns the up to date station information."""
sub = dbsession.query(UsageData.station_id, func.max(UsageData.id).label('max_update')).group_by(
UsageData.station_id).subquery()
return dbsession.query(
UsageData.last_update,
UsageData.available_bike_stands, UsageData.available_bikes).join(sub, and_(
sub.c.max_update == UsageData.id)).all()
class UsageData(Base):
"""holds data about bicycle usage for every station."""
__tablename__ = "bike_usage"
id = Column(Integer, primary_key=True)
station_id = Column(Integer, ForeignKey('station.number'))
status = Column(Boolean, nullable=False)
bike_stands = Column(Integer, nullable=False)
available_bike_stands = Column(Integer, nullable=False)
available_bikes = Column(Integer, nullable=False)
last_update = Column(DateTime, nullable=False)
@property
def dt_last_update(self):
"""return when was the last update. Once again this is used in the scraper to determine newly updated data."""
return self.last_update
@dt_last_update.setter
def dt_last_update(self, val):
"""creates a datetime object which is added to the database with an update from the dublinbikes api.
once again used by the scraper. essentially the adds the time at which the update was entered."""
self.last_update = datetime.fromtimestamp(int(val)/1000)
@classmethod
def get_bikes_for_weekday(cls, dbsession, weekday, station_id):
"""returns a list of bikes for a provided weekday and station.
averaged per hour so 24 results."""
station = [("Time", "Available Bikes", "Available Stands")]
station_data = dbsession.query(func.hour(cls.last_update),
func.avg(cls.available_bikes),
func.avg(cls.available_bike_stands)) \
.filter(cls.station_id == station_id,
func.weekday(cls.last_update) == weekday) \
.group_by(func.hour(cls.last_update)) \
.all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(a, float(b), float(c)) for a, b, c in station_data])
else:
station.extend([(0,0,0)])
return station
@classmethod
def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):
"""very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.
returns a list of bikes for a provided datetime object (wetdate) and station."""
# averaged per hour so 24 results.
station = [("Time", "Available Bikes", "Available Stands")]
station_data = dbsession.query(
func.hour(cls.last_update),
func.avg(cls.available_bikes),
func.avg(cls.available_bike_stands))\
.filter(cls.station_id == station_id,
func.date(cls.last_update) == wetdate.date())\
.group_by(func.hour(cls.last_update)).all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(a, float(b), float(c)) for a, b, c in station_data])
else:
station.extend([(0,0,0)])
return station
@classmethod
def get_bikes_for_week(cls, dbsession, station_id):
"""as method name describes.
similar to methods above but averaged over week."""
station = [("Day", "Available Bikes")]
station_data = dbsession.query(func.weekday(cls.last_update),
func.avg(cls.available_bikes)) \
.filter(cls.station_id == station_id) \
.group_by(func.weekday(cls.last_update)) \
.all()
# this section parses the query return into a readable list.
# from docs:extend() appends the contents of seq to list.
if station_data:
station.extend([(days[a], float(b)) for a, b in station_data])
else:
station.extend([(0,0)])
return station
class Weather(Base):
"""holds data scraped from the open weather API."""
__tablename__ = "weather"
id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)
coord_lon = Column(Float)
coord_lat = Column(Float)
weather_id = Column(Integer)
weather_main = Column(String(45))
weather_description = Column(String(45))
weather_icon = Column(String(10))
base = Column(String(45))
main_temp = Column(Integer)
main_pressure = Column(Integer)
main_humidity = Column(Integer)
main_temp_min = Column(Integer)
main_temp_max = Column(Integer)
visibility = Column(Integer)
wind_speed = Column(Float)
wind_deg = Column(Integer)
clouds_all = Column(Integer)
dt = Column(DateTime)
sys_type = Column(Integer)
sys_id = Column(Integer)
sys_message = Column(Float)
sys_country = Column(String(2))
sys_sunrise = Column(DateTime)
sys_sunset = Column(DateTime)
city_id = Column(Integer)
city_name = Column(String(6))
cod = Column(Integer)
@classmethod
def findWetWeatherDays(self, dbsession, today):
"""finds days where there was wet weather."""
wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == "light rain", self.weather_description == "moderate rain")).all()
# if one of those days is today return it.
# else just return a wet day.
for i in range(len(wetDays)):
if today == wetDays[i][0].weekday():
return wetDays[i][0]
else:
return wetDays[0][0]
# path to DB
connection_string='mysql+mysqldb://{username}:{password}@{host}:3306/dublinbikesdata'.format(username=os.environ['DatabaseUser'],
password=os.environ['DatabasePassword'],
host=os.environ['DatabaseServer'])
engine = create_engine(connection_string, poolclass=NullPool)
# create the session using sqlalchemy.
db_session = scoped_session(sessionmaker(bind=engine, autocommit=False, autoflush=False))
if __name__=="__main__":
"""Below is used for testing if the database is working by running this file directly.
not used in the actual app."""
station_id = 42
static_info = db_session.query(Station.number,
Station.name,
Station.address,
Station.position_lat,
Station.position_long).all()
dynamic_info = Station.get_current_station_info(db_session)
static_fields = ['number', 'name', 'address', 'position_lat', 'position_long']
dynamic_fields = ['last_update', 'available_bike_stands', 'available_bikes']
json_data = [dict(zip(static_fields + dynamic_fields, static + dynamic))
for static, dynamic in
zip(static_info, dynamic_info)]
print(json_data)
|
normal
|
{
"blob_id": "6db0adf25a7cc38c8965c07cc80bde0d82c75d56",
"index": 3955,
"step-1": "<mask token>\n\n\nclass UsageData(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def dt_last_update(self):\n \"\"\"return when was the last update. Once again this is used in the scraper to determine newly updated data.\"\"\"\n return self.last_update\n <mask token>\n\n @classmethod\n def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n \"\"\"returns a list of bikes for a provided weekday and station.\n averaged per hour so 24 results.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.weekday(cls.last_update) ==\n weekday).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):\n \"\"\"very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.\n returns a list of bikes for a provided datetime object (wetdate) and station.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.date(cls.last_update) ==\n wetdate.date()).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_week(cls, dbsession, station_id):\n \"\"\"as method name describes.\n similar to methods above but averaged over week.\"\"\"\n station = [('Day', 'Available Bikes')]\n station_data = dbsession.query(func.weekday(cls.last_update), func.\n avg(cls.available_bikes)).filter(cls.station_id == station_id\n ).group_by(func.weekday(cls.last_update)).all()\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0, 0)])\n return station\n\n\nclass Weather(Base):\n \"\"\"holds data scraped from the open weather API.\"\"\"\n __tablename__ = 'weather'\n id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)\n coord_lon = Column(Float)\n coord_lat = Column(Float)\n weather_id = Column(Integer)\n weather_main = Column(String(45))\n weather_description = Column(String(45))\n weather_icon = Column(String(10))\n base = Column(String(45))\n main_temp = Column(Integer)\n main_pressure = Column(Integer)\n main_humidity = Column(Integer)\n main_temp_min = Column(Integer)\n main_temp_max = Column(Integer)\n visibility = Column(Integer)\n wind_speed = Column(Float)\n wind_deg = Column(Integer)\n clouds_all = Column(Integer)\n dt = Column(DateTime)\n sys_type = Column(Integer)\n sys_id = Column(Integer)\n sys_message = Column(Float)\n sys_country = Column(String(2))\n sys_sunrise = Column(DateTime)\n sys_sunset = Column(DateTime)\n city_id = Column(Integer)\n city_name = Column(String(6))\n cod = Column(Integer)\n\n @classmethod\n def findWetWeatherDays(self, dbsession, today):\n \"\"\"finds days where there was wet weather.\"\"\"\n wetDays = dbsession.query(self.dt).filter(or_(self.\n weather_description == 'light rain', self.weather_description ==\n 'moderate rain')).all()\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UsageData(Base):\n <mask token>\n __tablename__ = 'bike_usage'\n id = Column(Integer, primary_key=True)\n station_id = Column(Integer, ForeignKey('station.number'))\n status = Column(Boolean, nullable=False)\n bike_stands = Column(Integer, nullable=False)\n available_bike_stands = Column(Integer, nullable=False)\n available_bikes = Column(Integer, nullable=False)\n last_update = Column(DateTime, nullable=False)\n\n @property\n def dt_last_update(self):\n \"\"\"return when was the last update. Once again this is used in the scraper to determine newly updated data.\"\"\"\n return self.last_update\n\n @dt_last_update.setter\n def dt_last_update(self, val):\n \"\"\"creates a datetime object which is added to the database with an update from the dublinbikes api.\n once again used by the scraper. essentially the adds the time at which the update was entered.\"\"\"\n self.last_update = datetime.fromtimestamp(int(val) / 1000)\n\n @classmethod\n def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n \"\"\"returns a list of bikes for a provided weekday and station.\n averaged per hour so 24 results.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.weekday(cls.last_update) ==\n weekday).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):\n \"\"\"very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.\n returns a list of bikes for a provided datetime object (wetdate) and station.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.date(cls.last_update) ==\n wetdate.date()).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_week(cls, dbsession, station_id):\n \"\"\"as method name describes.\n similar to methods above but averaged over week.\"\"\"\n station = [('Day', 'Available Bikes')]\n station_data = dbsession.query(func.weekday(cls.last_update), func.\n avg(cls.available_bikes)).filter(cls.station_id == station_id\n ).group_by(func.weekday(cls.last_update)).all()\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0, 0)])\n return station\n\n\nclass Weather(Base):\n \"\"\"holds data scraped from the open weather API.\"\"\"\n __tablename__ = 'weather'\n id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)\n coord_lon = Column(Float)\n coord_lat = Column(Float)\n weather_id = Column(Integer)\n weather_main = Column(String(45))\n weather_description = Column(String(45))\n weather_icon = Column(String(10))\n base = Column(String(45))\n main_temp = Column(Integer)\n main_pressure = Column(Integer)\n main_humidity = Column(Integer)\n main_temp_min = Column(Integer)\n main_temp_max = Column(Integer)\n visibility = Column(Integer)\n wind_speed = Column(Float)\n wind_deg = Column(Integer)\n clouds_all = Column(Integer)\n dt = Column(DateTime)\n sys_type = Column(Integer)\n sys_id = Column(Integer)\n sys_message = Column(Float)\n sys_country = Column(String(2))\n sys_sunrise = Column(DateTime)\n sys_sunset = Column(DateTime)\n city_id = Column(Integer)\n city_name = Column(String(6))\n cod = Column(Integer)\n\n @classmethod\n def findWetWeatherDays(self, dbsession, today):\n \"\"\"finds days where there was wet weather.\"\"\"\n wetDays = dbsession.query(self.dt).filter(or_(self.\n weather_description == 'light rain', self.weather_description ==\n 'moderate rain')).all()\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UsageData(Base):\n \"\"\"holds data about bicycle usage for every station.\"\"\"\n __tablename__ = 'bike_usage'\n id = Column(Integer, primary_key=True)\n station_id = Column(Integer, ForeignKey('station.number'))\n status = Column(Boolean, nullable=False)\n bike_stands = Column(Integer, nullable=False)\n available_bike_stands = Column(Integer, nullable=False)\n available_bikes = Column(Integer, nullable=False)\n last_update = Column(DateTime, nullable=False)\n\n @property\n def dt_last_update(self):\n \"\"\"return when was the last update. Once again this is used in the scraper to determine newly updated data.\"\"\"\n return self.last_update\n\n @dt_last_update.setter\n def dt_last_update(self, val):\n \"\"\"creates a datetime object which is added to the database with an update from the dublinbikes api.\n once again used by the scraper. essentially the adds the time at which the update was entered.\"\"\"\n self.last_update = datetime.fromtimestamp(int(val) / 1000)\n\n @classmethod\n def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n \"\"\"returns a list of bikes for a provided weekday and station.\n averaged per hour so 24 results.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.weekday(cls.last_update) ==\n weekday).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):\n \"\"\"very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.\n returns a list of bikes for a provided datetime object (wetdate) and station.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.date(cls.last_update) ==\n wetdate.date()).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_week(cls, dbsession, station_id):\n \"\"\"as method name describes.\n similar to methods above but averaged over week.\"\"\"\n station = [('Day', 'Available Bikes')]\n station_data = dbsession.query(func.weekday(cls.last_update), func.\n avg(cls.available_bikes)).filter(cls.station_id == station_id\n ).group_by(func.weekday(cls.last_update)).all()\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0, 0)])\n return station\n\n\nclass Weather(Base):\n \"\"\"holds data scraped from the open weather API.\"\"\"\n __tablename__ = 'weather'\n id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)\n coord_lon = Column(Float)\n coord_lat = Column(Float)\n weather_id = Column(Integer)\n weather_main = Column(String(45))\n weather_description = Column(String(45))\n weather_icon = Column(String(10))\n base = Column(String(45))\n main_temp = Column(Integer)\n main_pressure = Column(Integer)\n main_humidity = Column(Integer)\n main_temp_min = Column(Integer)\n main_temp_max = Column(Integer)\n visibility = Column(Integer)\n wind_speed = Column(Float)\n wind_deg = Column(Integer)\n clouds_all = Column(Integer)\n dt = Column(DateTime)\n sys_type = Column(Integer)\n sys_id = Column(Integer)\n sys_message = Column(Float)\n sys_country = Column(String(2))\n sys_sunrise = Column(DateTime)\n sys_sunset = Column(DateTime)\n city_id = Column(Integer)\n city_name = Column(String(6))\n cod = Column(Integer)\n\n @classmethod\n def findWetWeatherDays(self, dbsession, today):\n \"\"\"finds days where there was wet weather.\"\"\"\n wetDays = dbsession.query(self.dt).filter(or_(self.\n weather_description == 'light rain', self.weather_description ==\n 'moderate rain')).all()\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Station(Base):\n <mask token>\n __tablename__ = 'station'\n number = Column(Integer, primary_key=True, autoincrement=False)\n contract_name = Column(String(250), nullable=False)\n name = Column(String(250), nullable=False)\n address = Column(String(250), nullable=False)\n position_lat = Column(Float, nullable=False)\n position_long = Column(Float, nullable=False)\n banking = Column(Boolean, nullable=True)\n bonus = Column(Boolean, nullable=True)\n station_usage = relationship('UsageData', lazy='dynamic')\n\n @property\n def last_updated(self):\n \"\"\"this method is used in the scraper to return the last updated station.\n this lets us pull only updated data.\"\"\"\n try:\n return max(self.station_usage, key=lambda x: x.last_update\n ).dt_last_update\n except ValueError:\n return datetime.fromtimestamp(0)\n\n @classmethod\n def get_current_station_info(cls, dbsession):\n \"\"\"as the method name suggests this returns the up to date station information.\"\"\"\n sub = dbsession.query(UsageData.station_id, func.max(UsageData.id).\n label('max_update')).group_by(UsageData.station_id).subquery()\n return dbsession.query(UsageData.last_update, UsageData.\n available_bike_stands, UsageData.available_bikes).join(sub,\n and_(sub.c.max_update == UsageData.id)).all()\n\n\nclass UsageData(Base):\n \"\"\"holds data about bicycle usage for every station.\"\"\"\n __tablename__ = 'bike_usage'\n id = Column(Integer, primary_key=True)\n station_id = Column(Integer, ForeignKey('station.number'))\n status = Column(Boolean, nullable=False)\n bike_stands = Column(Integer, nullable=False)\n available_bike_stands = Column(Integer, nullable=False)\n available_bikes = Column(Integer, nullable=False)\n last_update = Column(DateTime, nullable=False)\n\n @property\n def dt_last_update(self):\n \"\"\"return when was the last update. Once again this is used in the scraper to determine newly updated data.\"\"\"\n return self.last_update\n\n @dt_last_update.setter\n def dt_last_update(self, val):\n \"\"\"creates a datetime object which is added to the database with an update from the dublinbikes api.\n once again used by the scraper. essentially the adds the time at which the update was entered.\"\"\"\n self.last_update = datetime.fromtimestamp(int(val) / 1000)\n\n @classmethod\n def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n \"\"\"returns a list of bikes for a provided weekday and station.\n averaged per hour so 24 results.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.weekday(cls.last_update) ==\n weekday).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):\n \"\"\"very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.\n returns a list of bikes for a provided datetime object (wetdate) and station.\"\"\"\n station = [('Time', 'Available Bikes', 'Available Stands')]\n station_data = dbsession.query(func.hour(cls.last_update), func.avg\n (cls.available_bikes), func.avg(cls.available_bike_stands)).filter(\n cls.station_id == station_id, func.date(cls.last_update) ==\n wetdate.date()).group_by(func.hour(cls.last_update)).all()\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in\n station_data])\n else:\n station.extend([(0, 0, 0)])\n return station\n\n @classmethod\n def get_bikes_for_week(cls, dbsession, station_id):\n \"\"\"as method name describes.\n similar to methods above but averaged over week.\"\"\"\n station = [('Day', 'Available Bikes')]\n station_data = dbsession.query(func.weekday(cls.last_update), func.\n avg(cls.available_bikes)).filter(cls.station_id == station_id\n ).group_by(func.weekday(cls.last_update)).all()\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0, 0)])\n return station\n\n\nclass Weather(Base):\n \"\"\"holds data scraped from the open weather API.\"\"\"\n __tablename__ = 'weather'\n id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)\n coord_lon = Column(Float)\n coord_lat = Column(Float)\n weather_id = Column(Integer)\n weather_main = Column(String(45))\n weather_description = Column(String(45))\n weather_icon = Column(String(10))\n base = Column(String(45))\n main_temp = Column(Integer)\n main_pressure = Column(Integer)\n main_humidity = Column(Integer)\n main_temp_min = Column(Integer)\n main_temp_max = Column(Integer)\n visibility = Column(Integer)\n wind_speed = Column(Float)\n wind_deg = Column(Integer)\n clouds_all = Column(Integer)\n dt = Column(DateTime)\n sys_type = Column(Integer)\n sys_id = Column(Integer)\n sys_message = Column(Float)\n sys_country = Column(String(2))\n sys_sunrise = Column(DateTime)\n sys_sunset = Column(DateTime)\n city_id = Column(Integer)\n city_name = Column(String(6))\n cod = Column(Integer)\n\n @classmethod\n def findWetWeatherDays(self, dbsession, today):\n \"\"\"finds days where there was wet weather.\"\"\"\n wetDays = dbsession.query(self.dt).filter(or_(self.\n weather_description == 'light rain', self.weather_description ==\n 'moderate rain')).all()\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]\n\n\n<mask token>\n",
"step-5": "import os\n\nfrom sqlalchemy import Column, ForeignKey, Integer, String, Float, Boolean, DateTime\nfrom sqlalchemy import and_, or_\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy.orm import sessionmaker, scoped_session, load_only\nfrom sqlalchemy.pool import NullPool\nfrom datetime import datetime\n\nBase = declarative_base()\ndays = ['M','T','W','T','F', 'S', 'S']\n\n# using sqlalchemy declare a class for each table in our database.\nclass Station(Base):\n \"\"\"this one is for storing information about each station.\"\"\"\n __tablename__ = \"station\"\n number = Column(Integer, primary_key=True, autoincrement=False)\n contract_name = Column(String(250), nullable=False)\n name = Column(String(250), nullable=False)\n address = Column(String(250), nullable=False)\n position_lat = Column(Float, nullable=False)\n position_long = Column(Float, nullable=False)\n banking = Column(Boolean, nullable=True)\n bonus = Column(Boolean, nullable=True)\n station_usage = relationship(\"UsageData\", lazy=\"dynamic\")\n\n\n @property\n def last_updated(self):\n \"\"\"this method is used in the scraper to return the last updated station.\n this lets us pull only updated data.\"\"\"\n try:\n return max(self.station_usage, key=lambda x: x.last_update).dt_last_update\n except ValueError:\n return datetime.fromtimestamp(0)\n\n @classmethod\n def get_current_station_info(cls, dbsession):\n \"\"\"as the method name suggests this returns the up to date station information.\"\"\"\n sub = dbsession.query(UsageData.station_id, func.max(UsageData.id).label('max_update')).group_by(\n UsageData.station_id).subquery()\n return dbsession.query(\n UsageData.last_update,\n UsageData.available_bike_stands, UsageData.available_bikes).join(sub, and_(\n sub.c.max_update == UsageData.id)).all()\n\n\nclass UsageData(Base):\n \"\"\"holds data about bicycle usage for every station.\"\"\"\n __tablename__ = \"bike_usage\"\n id = Column(Integer, primary_key=True)\n station_id = Column(Integer, ForeignKey('station.number'))\n status = Column(Boolean, nullable=False)\n bike_stands = Column(Integer, nullable=False)\n available_bike_stands = Column(Integer, nullable=False)\n available_bikes = Column(Integer, nullable=False)\n last_update = Column(DateTime, nullable=False)\n\n\n @property\n def dt_last_update(self):\n \"\"\"return when was the last update. Once again this is used in the scraper to determine newly updated data.\"\"\"\n return self.last_update\n\n\n @dt_last_update.setter\n def dt_last_update(self, val):\n \"\"\"creates a datetime object which is added to the database with an update from the dublinbikes api.\n once again used by the scraper. essentially the adds the time at which the update was entered.\"\"\"\n self.last_update = datetime.fromtimestamp(int(val)/1000)\n\n @classmethod\n def get_bikes_for_weekday(cls, dbsession, weekday, station_id):\n \"\"\"returns a list of bikes for a provided weekday and station.\n averaged per hour so 24 results.\"\"\"\n station = [(\"Time\", \"Available Bikes\", \"Available Stands\")]\n\n station_data = dbsession.query(func.hour(cls.last_update),\n func.avg(cls.available_bikes),\n func.avg(cls.available_bike_stands)) \\\n .filter(cls.station_id == station_id,\n func.weekday(cls.last_update) == weekday) \\\n .group_by(func.hour(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in station_data])\n else:\n station.extend([(0,0,0)])\n return station\n\n @classmethod\n def get_bikes_for_wetday(cls, dbsession, wetdate, station_id):\n \"\"\"very similar to get_bikes_for_weekday but not the same: date specified is wetdate not weekday.\n returns a list of bikes for a provided datetime object (wetdate) and station.\"\"\"\n # averaged per hour so 24 results.\n station = [(\"Time\", \"Available Bikes\", \"Available Stands\")]\n station_data = dbsession.query(\n func.hour(cls.last_update),\n func.avg(cls.available_bikes),\n func.avg(cls.available_bike_stands))\\\n .filter(cls.station_id == station_id,\n func.date(cls.last_update) == wetdate.date())\\\n .group_by(func.hour(cls.last_update)).all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(a, float(b), float(c)) for a, b, c in station_data])\n else:\n station.extend([(0,0,0)])\n return station\n\n\n @classmethod\n def get_bikes_for_week(cls, dbsession, station_id):\n \"\"\"as method name describes.\n similar to methods above but averaged over week.\"\"\"\n station = [(\"Day\", \"Available Bikes\")]\n station_data = dbsession.query(func.weekday(cls.last_update),\n func.avg(cls.available_bikes)) \\\n .filter(cls.station_id == station_id) \\\n .group_by(func.weekday(cls.last_update)) \\\n .all()\n\n # this section parses the query return into a readable list.\n # from docs:extend() appends the contents of seq to list.\n if station_data:\n station.extend([(days[a], float(b)) for a, b in station_data])\n else:\n station.extend([(0,0)])\n\n return station\n\n\nclass Weather(Base):\n \"\"\"holds data scraped from the open weather API.\"\"\"\n __tablename__ = \"weather\"\n id = Column(Integer, nullable=False, primary_key=True, autoincrement=True)\n coord_lon = Column(Float)\n coord_lat = Column(Float)\n weather_id = Column(Integer)\n weather_main = Column(String(45))\n weather_description = Column(String(45))\n weather_icon = Column(String(10))\n base = Column(String(45))\n main_temp = Column(Integer)\n main_pressure = Column(Integer)\n main_humidity = Column(Integer)\n main_temp_min = Column(Integer)\n main_temp_max = Column(Integer)\n visibility = Column(Integer)\n wind_speed = Column(Float)\n wind_deg = Column(Integer)\n clouds_all = Column(Integer)\n dt = Column(DateTime)\n sys_type = Column(Integer)\n sys_id = Column(Integer)\n sys_message = Column(Float)\n sys_country = Column(String(2))\n sys_sunrise = Column(DateTime)\n sys_sunset = Column(DateTime)\n city_id = Column(Integer)\n city_name = Column(String(6))\n cod = Column(Integer)\n\n @classmethod\n def findWetWeatherDays(self, dbsession, today):\n \"\"\"finds days where there was wet weather.\"\"\"\n wetDays = dbsession.query(self.dt).filter(or_(self.weather_description == \"light rain\", self.weather_description == \"moderate rain\")).all()\n # if one of those days is today return it.\n # else just return a wet day.\n for i in range(len(wetDays)):\n if today == wetDays[i][0].weekday():\n return wetDays[i][0]\n else:\n return wetDays[0][0]\n\n\n# path to DB\nconnection_string='mysql+mysqldb://{username}:{password}@{host}:3306/dublinbikesdata'.format(username=os.environ['DatabaseUser'],\n password=os.environ['DatabasePassword'],\n host=os.environ['DatabaseServer'])\nengine = create_engine(connection_string, poolclass=NullPool)\n\n# create the session using sqlalchemy.\ndb_session = scoped_session(sessionmaker(bind=engine, autocommit=False, autoflush=False))\n\n\nif __name__==\"__main__\":\n \"\"\"Below is used for testing if the database is working by running this file directly.\n not used in the actual app.\"\"\"\n station_id = 42\n\n static_info = db_session.query(Station.number,\n Station.name,\n Station.address,\n Station.position_lat,\n Station.position_long).all()\n dynamic_info = Station.get_current_station_info(db_session)\n static_fields = ['number', 'name', 'address', 'position_lat', 'position_long']\n dynamic_fields = ['last_update', 'available_bike_stands', 'available_bikes']\n\n json_data = [dict(zip(static_fields + dynamic_fields, static + dynamic))\n for static, dynamic in\n zip(static_info, dynamic_info)]\n print(json_data)\n",
"step-ids": [
9,
11,
12,
16,
21
]
}
|
[
9,
11,
12,
16,
21
] |
<|reserved_special_token_0|>
def genAcc():
num = 1
y = [3, 0]
while num <= 8:
x = random.randint(0, 9)
y.append(x)
num = num + 1
accountNo = ''.join([str(i) for i in y])
return accountNo
def transfer(tName, tNo, amount, tBankName):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print(
"""Tranfer successful! \\Account name {}
Account number : {}
Amount transferred : {}
Bank : {}"""
.format(tName, tNo, amount, tBankName))
print('Balance : ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
<|reserved_special_token_0|>
def statement():
print('hi {} your balance is ${}.'.format(user[1], user[-1]))
def pinval(val):
if val == user[-3]:
return True
else:
return False
def pinReset(val, val2):
if val == val2:
user[-3] = val
print('Pin change successful')
newval = user[-3]
try:
file.update(user_acc_no, -3, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('oops!! The two pin are not the same')
tym = datetime.datetime.now()
print(tym)
def passReset(val, val2):
if val == val2:
user[-2] = val
print('Password change successful')
newval = user[-2]
try:
file.update(user_acc_no, -2, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('Passwords not Matched')
tym = datetime.datetime.now()
print(tym)
<|reserved_special_token_0|>
def operation(user):
print('==========================ZURI BANK===================')
print('welcome {}'.format(user[1] + ' ' + user[0]))
print('Balance : ${}'.format(user[-1]))
print('Please input only 1,2,3,4,5,6, or 7')
mainOpt = input(
"""select an option:
1. Transfer
2. Withdrawal
3. Deposit
4. Change Pin
5. Reset Password
6. Account Statment
7. Complaint
8. Logout
0. Exit
==>"""
)
if mainOpt == '1':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter amount:'))
tName = input('Enter account name:')
tNo = input('Enter account Number:')
tBankName = input('Enter Bank:')
val = input('Enter PIN')
if pinval(val) == True:
if len(tNo) != 10:
print(
'wrong account number, Note Account number must be 10 digit'
)
else:
transfer(tName, tNo, amount, tBankName)
operation(user)
else:
print('wrong pin')
elif mainOpt == '2':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
val = int(input('Enter transaction Pin:'))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print('oop!! wrong pin')
elif mainOpt == '3':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
deposit(amount)
operation(user)
elif mainOpt == '4':
val = input('Enter new pin:')
val2 = input('Confirm new pin:')
pinReset(val, val2)
operation(user)
elif mainOpt == '5':
val = input('Enter new password:')
val2 = input('Confirm new password:')
passReset(val, val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp = input('Enter complaint:')
print(
'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'
.format(user[1], user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print('Thank you for banking with us!!!')
exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def genAcc():
num = 1
y = [3, 0]
while num <= 8:
x = random.randint(0, 9)
y.append(x)
num = num + 1
accountNo = ''.join([str(i) for i in y])
return accountNo
def transfer(tName, tNo, amount, tBankName):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print(
"""Tranfer successful! \\Account name {}
Account number : {}
Amount transferred : {}
Bank : {}"""
.format(tName, tNo, amount, tBankName))
print('Balance : ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
def deposit(amount):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print('{} successful deposited'.format(amount))
print('your balance is ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
<|reserved_special_token_0|>
def statement():
print('hi {} your balance is ${}.'.format(user[1], user[-1]))
def pinval(val):
if val == user[-3]:
return True
else:
return False
def pinReset(val, val2):
if val == val2:
user[-3] = val
print('Pin change successful')
newval = user[-3]
try:
file.update(user_acc_no, -3, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('oops!! The two pin are not the same')
tym = datetime.datetime.now()
print(tym)
def passReset(val, val2):
if val == val2:
user[-2] = val
print('Password change successful')
newval = user[-2]
try:
file.update(user_acc_no, -2, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('Passwords not Matched')
tym = datetime.datetime.now()
print(tym)
<|reserved_special_token_0|>
def operation(user):
print('==========================ZURI BANK===================')
print('welcome {}'.format(user[1] + ' ' + user[0]))
print('Balance : ${}'.format(user[-1]))
print('Please input only 1,2,3,4,5,6, or 7')
mainOpt = input(
"""select an option:
1. Transfer
2. Withdrawal
3. Deposit
4. Change Pin
5. Reset Password
6. Account Statment
7. Complaint
8. Logout
0. Exit
==>"""
)
if mainOpt == '1':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter amount:'))
tName = input('Enter account name:')
tNo = input('Enter account Number:')
tBankName = input('Enter Bank:')
val = input('Enter PIN')
if pinval(val) == True:
if len(tNo) != 10:
print(
'wrong account number, Note Account number must be 10 digit'
)
else:
transfer(tName, tNo, amount, tBankName)
operation(user)
else:
print('wrong pin')
elif mainOpt == '2':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
val = int(input('Enter transaction Pin:'))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print('oop!! wrong pin')
elif mainOpt == '3':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
deposit(amount)
operation(user)
elif mainOpt == '4':
val = input('Enter new pin:')
val2 = input('Confirm new pin:')
pinReset(val, val2)
operation(user)
elif mainOpt == '5':
val = input('Enter new password:')
val2 = input('Confirm new password:')
passReset(val, val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp = input('Enter complaint:')
print(
'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'
.format(user[1], user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print('Thank you for banking with us!!!')
exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def register():
global first, last, email, pin, password, accountName
first = input('input firstname:')
last = input('input lastname:')
email = input('input email:')
pin = input('input a four digit pin:')
password = input('Input Password:')
accountName = '{} {}'.format(last, first)
def genAcc():
num = 1
y = [3, 0]
while num <= 8:
x = random.randint(0, 9)
y.append(x)
num = num + 1
accountNo = ''.join([str(i) for i in y])
return accountNo
def transfer(tName, tNo, amount, tBankName):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print(
"""Tranfer successful! \\Account name {}
Account number : {}
Amount transferred : {}
Bank : {}"""
.format(tName, tNo, amount, tBankName))
print('Balance : ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
def deposit(amount):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print('{} successful deposited'.format(amount))
print('your balance is ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
<|reserved_special_token_0|>
def statement():
print('hi {} your balance is ${}.'.format(user[1], user[-1]))
def pinval(val):
if val == user[-3]:
return True
else:
return False
def pinReset(val, val2):
if val == val2:
user[-3] = val
print('Pin change successful')
newval = user[-3]
try:
file.update(user_acc_no, -3, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('oops!! The two pin are not the same')
tym = datetime.datetime.now()
print(tym)
def passReset(val, val2):
if val == val2:
user[-2] = val
print('Password change successful')
newval = user[-2]
try:
file.update(user_acc_no, -2, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('Passwords not Matched')
tym = datetime.datetime.now()
print(tym)
<|reserved_special_token_0|>
def operation(user):
print('==========================ZURI BANK===================')
print('welcome {}'.format(user[1] + ' ' + user[0]))
print('Balance : ${}'.format(user[-1]))
print('Please input only 1,2,3,4,5,6, or 7')
mainOpt = input(
"""select an option:
1. Transfer
2. Withdrawal
3. Deposit
4. Change Pin
5. Reset Password
6. Account Statment
7. Complaint
8. Logout
0. Exit
==>"""
)
if mainOpt == '1':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter amount:'))
tName = input('Enter account name:')
tNo = input('Enter account Number:')
tBankName = input('Enter Bank:')
val = input('Enter PIN')
if pinval(val) == True:
if len(tNo) != 10:
print(
'wrong account number, Note Account number must be 10 digit'
)
else:
transfer(tName, tNo, amount, tBankName)
operation(user)
else:
print('wrong pin')
elif mainOpt == '2':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
val = int(input('Enter transaction Pin:'))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print('oop!! wrong pin')
elif mainOpt == '3':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
deposit(amount)
operation(user)
elif mainOpt == '4':
val = input('Enter new pin:')
val2 = input('Confirm new pin:')
pinReset(val, val2)
operation(user)
elif mainOpt == '5':
val = input('Enter new password:')
val2 = input('Confirm new password:')
passReset(val, val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp = input('Enter complaint:')
print(
'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'
.format(user[1], user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print('Thank you for banking with us!!!')
exit()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def register():
global first, last, email, pin, password, accountName
first = input('input firstname:')
last = input('input lastname:')
email = input('input email:')
pin = input('input a four digit pin:')
password = input('Input Password:')
accountName = '{} {}'.format(last, first)
def genAcc():
num = 1
y = [3, 0]
while num <= 8:
x = random.randint(0, 9)
y.append(x)
num = num + 1
accountNo = ''.join([str(i) for i in y])
return accountNo
def transfer(tName, tNo, amount, tBankName):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print(
"""Tranfer successful! \\Account name {}
Account number : {}
Amount transferred : {}
Bank : {}"""
.format(tName, tNo, amount, tBankName))
print('Balance : ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
def deposit(amount):
user[-1] = int(user[-1]) + amount
newval = user[-1]
newval = str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
print('{} successful deposited'.format(amount))
print('your balance is ${}'.format(user[-1]))
tym = datetime.datetime.now()
print(tym)
def withdraw(amount):
user[-1] = int(user[-1])
if user[-1] > amount:
user[-1] -= amount
print('successful')
print('your balance is ${}'.format(user[-1]))
else:
print('Sorry, not enough funds!')
newval = user[-1]
str(newval)
try:
file.update(user_acc_no, -1, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
tym = datetime.datetime.now()
print(tym)
def statement():
print('hi {} your balance is ${}.'.format(user[1], user[-1]))
def pinval(val):
if val == user[-3]:
return True
else:
return False
def pinReset(val, val2):
if val == val2:
user[-3] = val
print('Pin change successful')
newval = user[-3]
try:
file.update(user_acc_no, -3, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('oops!! The two pin are not the same')
tym = datetime.datetime.now()
print(tym)
def passReset(val, val2):
if val == val2:
user[-2] = val
print('Password change successful')
newval = user[-2]
try:
file.update(user_acc_no, -2, newval)
except FileNotFoundError:
print('an issues occured due to network, try again later')
return False
else:
print('Passwords not Matched')
tym = datetime.datetime.now()
print(tym)
def login():
global user_acc_no, user_password, user
print('===================LOGIN PAGE=================')
print('Enter your login details')
user_acc_no = int(input('Enter username:'))
user_password = getpass('Enter password:')
user = file.authentication(user_acc_no, user_password)
if user:
operation(user)
else:
print('invalid account and password')
login()
def welcome():
opt = input('Hello!, Welcome to Zuri Bank \n1. Register\n2.Login \n==>')
if opt == '1':
print('============================ZURI BANK========================')
print(
"""Welcome please carefully follow the prompt and register your details
Note please only input 1 or 2 """
)
register()
accountNo = ''
accountNo = genAcc()
is_user_created = file.create(accountNo, first, last, email, pin,
password)
if is_user_created:
try:
print(
"""Registration Successful!!!
your details are:
Account name is {}
Account number is {}"""
.format(accountName, accountNo))
login()
tym = datetime.datetime.now()
print(tym)
except FileExistsError:
print(
'sorry there was a issue in network connection, please try again'
)
register()
except ValueError:
print(
'sorry there was a issue in network connection, please try again'
)
register()
elif opt == '2':
login()
else:
print('Wrong input. Note: enter 1 or 2 to select')
def operation(user):
print('==========================ZURI BANK===================')
print('welcome {}'.format(user[1] + ' ' + user[0]))
print('Balance : ${}'.format(user[-1]))
print('Please input only 1,2,3,4,5,6, or 7')
mainOpt = input(
"""select an option:
1. Transfer
2. Withdrawal
3. Deposit
4. Change Pin
5. Reset Password
6. Account Statment
7. Complaint
8. Logout
0. Exit
==>"""
)
if mainOpt == '1':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter amount:'))
tName = input('Enter account name:')
tNo = input('Enter account Number:')
tBankName = input('Enter Bank:')
val = input('Enter PIN')
if pinval(val) == True:
if len(tNo) != 10:
print(
'wrong account number, Note Account number must be 10 digit'
)
else:
transfer(tName, tNo, amount, tBankName)
operation(user)
else:
print('wrong pin')
elif mainOpt == '2':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
val = int(input('Enter transaction Pin:'))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print('oop!! wrong pin')
elif mainOpt == '3':
print('Balance = ${}'.format(user[-1]))
amount = int(input('Enter Amount:'))
deposit(amount)
operation(user)
elif mainOpt == '4':
val = input('Enter new pin:')
val2 = input('Confirm new pin:')
pinReset(val, val2)
operation(user)
elif mainOpt == '5':
val = input('Enter new password:')
val2 = input('Confirm new password:')
passReset(val, val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp = input('Enter complaint:')
print(
'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'
.format(user[1], user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print('Thank you for banking with us!!!')
exit()
welcome()
<|reserved_special_token_1|>
import random
import datetime
import userval
import file
from getpass import getpass
#SORRY FOR THE REDUNDANT CODE, I RAN OUT OF OPTIONS
def register():
global first,last,email,pin,password,accountName #prepared_user_details
first=input("input firstname:")
last=input("input lastname:")
email=input("input email:")
pin=input("input a four digit pin:")
password=input("Input Password:")
accountName = "{} {}".format(last,first)
#prepared_user_details= first + "," + last + "," + email + "," + str(pin) + "," + password + "," + str(0)
#---------------------Account number generator-------------------------
def genAcc():
num= 1
y=[3,0] #all account numbers generated must start with three zero to make it unique
while num <= 8:
x = random.randint(0,9)
y.append(x)
num = num +1
accountNo=''.join([str(i)for i in y])
return accountNo
#-----------------Transfer function---------------------
def transfer(tName, tNo, amount, tBankName):
user[-1]= int(user[-1]) + amount
newval=user[-1]
newval=str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
print("Tranfer successful! \Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}".format(tName, tNo, amount, tBankName))
print("Balance : ${}".format(user[-1]))
tym =datetime.datetime.now()
print(tym)
#-----------------deposit function-----------------------
def deposit(amount):
user[-1] = int(user[-1]) + amount
newval=user[-1]
newval=str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
print("{} successful deposited".format(amount))
print("your balance is ${}".format(user[-1]))
tym =datetime.datetime.now()
print(tym)
#------------------withdraw function---------------------------
def withdraw(amount):
user[-1]=int(user[-1])
if user[-1] > amount:
user[-1] -= amount
print("successful")
print("your balance is ${}".format(user[-1]))
else:
print("Sorry, not enough funds!")
newval = user[-1]
str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
tym =datetime.datetime.now()
print(tym)
#---------------------balance check function------------------------
def statement():
print("hi {} your balance is ${}.".format(user[1],user[-1]))
#---------------------pin validation function------------------------
def pinval(val):
if val == user[-3]:
return True
else:
return False
#---------------------pin reset function---------------------------
def pinReset(val,val2):
if val == val2:
user[-3] = val
print("Pin change successful")
newval = user[-3]
try:
file.update(user_acc_no,-3,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
else:
print("oops!! The two pin are not the same")
tym =datetime.datetime.now()
print(tym)
#-----------------password reset function-------------------------
def passReset(val, val2):
if val == val2:
user[-2]= val
print("Password change successful")
newval = user[-2]
try:
file.update(user_acc_no,-2,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
else:
print("Passwords not Matched")
tym =datetime.datetime.now()
print(tym)
#----------------------login function---------------------
def login():
global user_acc_no, user_password,user
print("===================LOGIN PAGE=================")
print("Enter your login details")
user_acc_no = int(input("Enter username:"))
user_password = getpass("Enter password:")
user= file.authentication(user_acc_no, user_password)
if user:
operation(user)
else:
print("invalid account and password")
login()
def welcome():
#---------------------------------main prompt---------------
opt= input("Hello!, Welcome to Zuri Bank \n1. Register\n2.Login \n==>")
#-----------------------------Registration Prompt--------------------------
if opt == '1':
print("============================ZURI BANK========================")
print("Welcome please carefully follow the prompt and register your details\n Note please only input 1 or 2 ")
register()
accountNo = ""
accountNo=genAcc()
is_user_created = file.create(accountNo,first,last,email,pin,password)
if is_user_created:
try:
print("Registration Successful!!!\n your details are:\n Account name is {} \n Account number is {}".format(accountName,accountNo))
login()
tym =datetime.datetime.now()
print(tym)
except FileExistsError:
print("sorry there was a issue in network connection, please try again")
register()
except ValueError:
print("sorry there was a issue in network connection, please try again")
register()
elif opt == '2':
login()
else:
print("Wrong input. Note: enter 1 or 2 to select")
def operation(user):
print("==========================ZURI BANK===================")
print("welcome {}".format(user[1] + ' ' + user[0]))
print("Balance : ${}".format(user[-1]))
print("Please input only 1,2,3,4,5,6, or 7")
mainOpt=input("select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>")
if mainOpt == '1':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter amount:"))
tName=input("Enter account name:")
tNo=input("Enter account Number:")
tBankName=input("Enter Bank:")
val=input("Enter PIN")
if (pinval(val) == True):
if len(tNo) != 10:
print("wrong account number, Note Account number must be 10 digit")
else:
transfer(tName,tNo,amount,tBankName)
operation(user)
else:
print("wrong pin")
elif mainOpt == '2':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter Amount:"))
val=int(input("Enter transaction Pin:"))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print("oop!! wrong pin")
elif mainOpt == '3':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter Amount:"))
deposit(amount)
operation(user)
elif mainOpt == '4':
val=input("Enter new pin:")
val2=input("Confirm new pin:")
pinReset(val,val2)
operation(user)
elif mainOpt == '5':
val=input("Enter new password:")
val2=input("Confirm new password:")
passReset(val,val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp=input("Enter complaint:")
print("Thanks {} for reaching to us, we will get back to you shortly via your email:{}".format(user[1],user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print("Thank you for banking with us!!!")
exit()
welcome()
|
flexible
|
{
"blob_id": "a8106c8f14e15706b12e6d157b889288b85bc277",
"index": 6789,
"step-1": "<mask token>\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print('{} successful deposited'.format(amount))\n print('your balance is ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef register():\n global first, last, email, pin, password, accountName\n first = input('input firstname:')\n last = input('input lastname:')\n email = input('input email:')\n pin = input('input a four digit pin:')\n password = input('Input Password:')\n accountName = '{} {}'.format(last, first)\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print('{} successful deposited'.format(amount))\n print('your balance is ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\n<mask token>\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef register():\n global first, last, email, pin, password, accountName\n first = input('input firstname:')\n last = input('input lastname:')\n email = input('input email:')\n pin = input('input a four digit pin:')\n password = input('Input Password:')\n accountName = '{} {}'.format(last, first)\n\n\ndef genAcc():\n num = 1\n y = [3, 0]\n while num <= 8:\n x = random.randint(0, 9)\n y.append(x)\n num = num + 1\n accountNo = ''.join([str(i) for i in y])\n return accountNo\n\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print(\n \"\"\"Tranfer successful! \\\\Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}\"\"\"\n .format(tName, tNo, amount, tBankName))\n print('Balance : ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval = user[-1]\n newval = str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n print('{} successful deposited'.format(amount))\n print('your balance is ${}'.format(user[-1]))\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef withdraw(amount):\n user[-1] = int(user[-1])\n if user[-1] > amount:\n user[-1] -= amount\n print('successful')\n print('your balance is ${}'.format(user[-1]))\n else:\n print('Sorry, not enough funds!')\n newval = user[-1]\n str(newval)\n try:\n file.update(user_acc_no, -1, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef statement():\n print('hi {} your balance is ${}.'.format(user[1], user[-1]))\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n\n\ndef pinReset(val, val2):\n if val == val2:\n user[-3] = val\n print('Pin change successful')\n newval = user[-3]\n try:\n file.update(user_acc_no, -3, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('oops!! The two pin are not the same')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef passReset(val, val2):\n if val == val2:\n user[-2] = val\n print('Password change successful')\n newval = user[-2]\n try:\n file.update(user_acc_no, -2, newval)\n except FileNotFoundError:\n print('an issues occured due to network, try again later')\n return False\n else:\n print('Passwords not Matched')\n tym = datetime.datetime.now()\n print(tym)\n\n\ndef login():\n global user_acc_no, user_password, user\n print('===================LOGIN PAGE=================')\n print('Enter your login details')\n user_acc_no = int(input('Enter username:'))\n user_password = getpass('Enter password:')\n user = file.authentication(user_acc_no, user_password)\n if user:\n operation(user)\n else:\n print('invalid account and password')\n login()\n\n\ndef welcome():\n opt = input('Hello!, Welcome to Zuri Bank \\n1. Register\\n2.Login \\n==>')\n if opt == '1':\n print('============================ZURI BANK========================')\n print(\n \"\"\"Welcome please carefully follow the prompt and register your details\n Note please only input 1 or 2 \"\"\"\n )\n register()\n accountNo = ''\n accountNo = genAcc()\n is_user_created = file.create(accountNo, first, last, email, pin,\n password)\n if is_user_created:\n try:\n print(\n \"\"\"Registration Successful!!!\n your details are:\n Account name is {} \n Account number is {}\"\"\"\n .format(accountName, accountNo))\n login()\n tym = datetime.datetime.now()\n print(tym)\n except FileExistsError:\n print(\n 'sorry there was a issue in network connection, please try again'\n )\n register()\n except ValueError:\n print(\n 'sorry there was a issue in network connection, please try again'\n )\n register()\n elif opt == '2':\n login()\n else:\n print('Wrong input. Note: enter 1 or 2 to select')\n\n\ndef operation(user):\n print('==========================ZURI BANK===================')\n print('welcome {}'.format(user[1] + ' ' + user[0]))\n print('Balance : ${}'.format(user[-1]))\n print('Please input only 1,2,3,4,5,6, or 7')\n mainOpt = input(\n \"\"\"select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>\"\"\"\n )\n if mainOpt == '1':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter amount:'))\n tName = input('Enter account name:')\n tNo = input('Enter account Number:')\n tBankName = input('Enter Bank:')\n val = input('Enter PIN')\n if pinval(val) == True:\n if len(tNo) != 10:\n print(\n 'wrong account number, Note Account number must be 10 digit'\n )\n else:\n transfer(tName, tNo, amount, tBankName)\n operation(user)\n else:\n print('wrong pin')\n elif mainOpt == '2':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n val = int(input('Enter transaction Pin:'))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print('oop!! wrong pin')\n elif mainOpt == '3':\n print('Balance = ${}'.format(user[-1]))\n amount = int(input('Enter Amount:'))\n deposit(amount)\n operation(user)\n elif mainOpt == '4':\n val = input('Enter new pin:')\n val2 = input('Confirm new pin:')\n pinReset(val, val2)\n operation(user)\n elif mainOpt == '5':\n val = input('Enter new password:')\n val2 = input('Confirm new password:')\n passReset(val, val2)\n operation(user)\n elif mainOpt == '6':\n statement()\n operation(user)\n elif mainOpt == '7':\n comp = input('Enter complaint:')\n print(\n 'Thanks {} for reaching to us, we will get back to you shortly via your email:{}'\n .format(user[1], user[3]))\n operation(user)\n elif mainOpt == '8':\n login()\n else:\n print('Thank you for banking with us!!!')\n exit()\n\n\nwelcome()\n",
"step-5": "import random\nimport datetime \nimport userval\nimport file\nfrom getpass import getpass\n#SORRY FOR THE REDUNDANT CODE, I RAN OUT OF OPTIONS\n\n\ndef register():\n global first,last,email,pin,password,accountName #prepared_user_details\n first=input(\"input firstname:\")\n last=input(\"input lastname:\")\n email=input(\"input email:\")\n pin=input(\"input a four digit pin:\")\n password=input(\"Input Password:\")\n accountName = \"{} {}\".format(last,first)\n #prepared_user_details= first + \",\" + last + \",\" + email + \",\" + str(pin) + \",\" + password + \",\" + str(0)\n \n #---------------------Account number generator-------------------------\n\ndef genAcc():\n num= 1\n y=[3,0] #all account numbers generated must start with three zero to make it unique\n while num <= 8:\n x = random.randint(0,9)\n y.append(x)\n num = num +1\n accountNo=''.join([str(i)for i in y])\n return accountNo\n \n #-----------------Transfer function---------------------\n\ndef transfer(tName, tNo, amount, tBankName):\n user[-1]= int(user[-1]) + amount\n newval=user[-1]\n newval=str(newval)\n try:\n file.update(user_acc_no,-1,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n print(\"Tranfer successful! \\Account name {} \\nAccount number : {} \\nAmount transferred : {} \\nBank : {}\".format(tName, tNo, amount, tBankName))\n print(\"Balance : ${}\".format(user[-1]))\n tym =datetime.datetime.now()\n print(tym)\n \n #-----------------deposit function-----------------------\n\ndef deposit(amount):\n user[-1] = int(user[-1]) + amount\n newval=user[-1]\n newval=str(newval)\n try:\n file.update(user_acc_no,-1,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n print(\"{} successful deposited\".format(amount))\n print(\"your balance is ${}\".format(user[-1]))\n tym =datetime.datetime.now()\n print(tym)\n #------------------withdraw function---------------------------\n\ndef withdraw(amount):\n user[-1]=int(user[-1])\n if user[-1] > amount:\n user[-1] -= amount\n print(\"successful\")\n print(\"your balance is ${}\".format(user[-1]))\n else:\n print(\"Sorry, not enough funds!\")\n newval = user[-1]\n str(newval)\n try:\n file.update(user_acc_no,-1,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n tym =datetime.datetime.now()\n print(tym)\n \n \n #---------------------balance check function------------------------\n\n\ndef statement():\n print(\"hi {} your balance is ${}.\".format(user[1],user[-1]))\n \n \n #---------------------pin validation function------------------------\n\n\ndef pinval(val):\n if val == user[-3]:\n return True\n else:\n return False\n \n \n #---------------------pin reset function---------------------------\ndef pinReset(val,val2):\n if val == val2:\n user[-3] = val\n print(\"Pin change successful\")\n newval = user[-3]\n try:\n file.update(user_acc_no,-3,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n else:\n print(\"oops!! The two pin are not the same\")\n tym =datetime.datetime.now()\n print(tym)\n \n \n #-----------------password reset function------------------------- \ndef passReset(val, val2):\n if val == val2:\n user[-2]= val\n print(\"Password change successful\")\n newval = user[-2]\n try:\n file.update(user_acc_no,-2,newval)\n except FileNotFoundError:\n print(\"an issues occured due to network, try again later\")\n return False\n else:\n print(\"Passwords not Matched\")\n tym =datetime.datetime.now()\n print(tym)\n \n \n #----------------------login function---------------------\n\ndef login():\n global user_acc_no, user_password,user\n print(\"===================LOGIN PAGE=================\") \n print(\"Enter your login details\")\n user_acc_no = int(input(\"Enter username:\"))\n user_password = getpass(\"Enter password:\")\n\n user= file.authentication(user_acc_no, user_password)\n \n if user:\n operation(user)\n else:\n print(\"invalid account and password\")\n login()\n \n \n\n\n\ndef welcome(): \n #---------------------------------main prompt---------------\n opt= input(\"Hello!, Welcome to Zuri Bank \\n1. Register\\n2.Login \\n==>\")\n #-----------------------------Registration Prompt--------------------------\n if opt == '1':\n print(\"============================ZURI BANK========================\")\n print(\"Welcome please carefully follow the prompt and register your details\\n Note please only input 1 or 2 \")\n \n register()\n accountNo = \"\"\n accountNo=genAcc()\n is_user_created = file.create(accountNo,first,last,email,pin,password)\n if is_user_created:\n try:\n print(\"Registration Successful!!!\\n your details are:\\n Account name is {} \\n Account number is {}\".format(accountName,accountNo))\n login()\n \n tym =datetime.datetime.now()\n print(tym)\n except FileExistsError:\n print(\"sorry there was a issue in network connection, please try again\")\n register()\n\n except ValueError:\n print(\"sorry there was a issue in network connection, please try again\")\n register()\n \n\n\n elif opt == '2':\n \n login()\n \n\n\n\n else:\n print(\"Wrong input. Note: enter 1 or 2 to select\")\n\n \ndef operation(user): \n \n print(\"==========================ZURI BANK===================\")\n print(\"welcome {}\".format(user[1] + ' ' + user[0]))\n print(\"Balance : ${}\".format(user[-1]))\n print(\"Please input only 1,2,3,4,5,6, or 7\")\n mainOpt=input(\"select an option: \\n1. Transfer \\n2. Withdrawal \\n3. Deposit \\n4. Change Pin \\n5. Reset Password \\n6. Account Statment\\n7. Complaint\\n8. Logout\\n0. Exit \\n==>\")\n \n \n if mainOpt == '1':\n print(\"Balance = ${}\".format(user[-1]))\n amount=int(input(\"Enter amount:\"))\n tName=input(\"Enter account name:\")\n tNo=input(\"Enter account Number:\")\n tBankName=input(\"Enter Bank:\")\n val=input(\"Enter PIN\")\n if (pinval(val) == True):\n if len(tNo) != 10:\n print(\"wrong account number, Note Account number must be 10 digit\")\n else:\n transfer(tName,tNo,amount,tBankName)\n operation(user)\n else:\n print(\"wrong pin\")\n \n elif mainOpt == '2':\n print(\"Balance = ${}\".format(user[-1]))\n amount=int(input(\"Enter Amount:\"))\n val=int(input(\"Enter transaction Pin:\"))\n pinval(val)\n if pinval(val) == True:\n withdraw(amount)\n operation(user)\n else:\n print(\"oop!! wrong pin\")\n \n elif mainOpt == '3':\n print(\"Balance = ${}\".format(user[-1]))\n amount=int(input(\"Enter Amount:\"))\n deposit(amount)\n operation(user)\n \n \n elif mainOpt == '4':\n val=input(\"Enter new pin:\")\n val2=input(\"Confirm new pin:\")\n pinReset(val,val2)\n operation(user)\n \n elif mainOpt == '5':\n val=input(\"Enter new password:\")\n val2=input(\"Confirm new password:\")\n passReset(val,val2)\n operation(user)\n \n elif mainOpt == '6':\n statement()\n operation(user)\n \n elif mainOpt == '7':\n comp=input(\"Enter complaint:\")\n print(\"Thanks {} for reaching to us, we will get back to you shortly via your email:{}\".format(user[1],user[3]))\n operation(user)\n \n elif mainOpt == '8':\n login()\n \n else:\n print(\"Thank you for banking with us!!!\")\n exit()\n \n\n\nwelcome()",
"step-ids": [
7,
8,
9,
13,
15
]
}
|
[
7,
8,
9,
13,
15
] |
<|reserved_special_token_0|>
class nearest(svm):
<|reserved_special_token_0|>
def __init__(self):
svm.__init__(self)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class nearest(svm):
<|reserved_special_token_0|>
def __init__(self):
svm.__init__(self)
def fit(self, x, y):
self.x = x
self.y = y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class nearest(svm):
name = 'MLLKM2'
def __init__(self):
svm.__init__(self)
def fit(self, x, y):
self.x = x
self.y = y
def predict(self, x):
diff = np.subtract(x, self.x)
distance = np.linalg.norm(diff, axis=1)
dmin = np.argmin(distance)
return self.y[dmin]
<|reserved_special_token_1|>
import numpy as np
class nearest(svm):
name = 'MLLKM2'
def __init__(self):
svm.__init__(self)
def fit(self, x, y):
self.x = x
self.y = y
def predict(self, x):
diff = np.subtract(x, self.x)
distance = np.linalg.norm(diff, axis=1)
dmin = np.argmin(distance)
return self.y[dmin]
<|reserved_special_token_1|>
import numpy as np
class nearest(svm):
name="MLLKM2"
def __init__(self):
svm.__init__(self)
def fit(self,x,y):
self.x=x
self.y=y
def predict(self,x):
diff=np.subtract(x,self.x)
distance=np.linalg.norm(diff,axis=1)
dmin= np.argmin( distance )
return self.y[dmin]
|
flexible
|
{
"blob_id": "7d1ca15129b1bf6b713e1d5eda4436d4a8539ad1",
"index": 5939,
"step-1": "<mask token>\n\n\nclass nearest(svm):\n <mask token>\n\n def __init__(self):\n svm.__init__(self)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass nearest(svm):\n <mask token>\n\n def __init__(self):\n svm.__init__(self)\n\n def fit(self, x, y):\n self.x = x\n self.y = y\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass nearest(svm):\n name = 'MLLKM2'\n\n def __init__(self):\n svm.__init__(self)\n\n def fit(self, x, y):\n self.x = x\n self.y = y\n\n def predict(self, x):\n diff = np.subtract(x, self.x)\n distance = np.linalg.norm(diff, axis=1)\n dmin = np.argmin(distance)\n return self.y[dmin]\n",
"step-4": "import numpy as np\n\n\nclass nearest(svm):\n name = 'MLLKM2'\n\n def __init__(self):\n svm.__init__(self)\n\n def fit(self, x, y):\n self.x = x\n self.y = y\n\n def predict(self, x):\n diff = np.subtract(x, self.x)\n distance = np.linalg.norm(diff, axis=1)\n dmin = np.argmin(distance)\n return self.y[dmin]\n",
"step-5": "import numpy as np\n\nclass nearest(svm):\n name=\"MLLKM2\"\n def __init__(self):\n svm.__init__(self)\n\n def fit(self,x,y):\n self.x=x\n self.y=y\n \n def predict(self,x):\n diff=np.subtract(x,self.x)\n distance=np.linalg.norm(diff,axis=1)\n dmin= np.argmin( distance )\n return self.y[dmin]",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
# Generated by Django 2.2.1 on 2019-05-05 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='divida',
name='id_cliente',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='divida',
name='motivo',
field=models.CharField(max_length=100),
),
]
|
normal
|
{
"blob_id": "1ce7b292f89fdf3f978c75d4cdf65b6991f71d6f",
"index": 7499,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='divida', name=\n 'id_cliente', field=models.CharField(max_length=10)), migrations.\n AlterField(model_name='divida', name='motivo', field=models.\n CharField(max_length=100))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='divida', name=\n 'id_cliente', field=models.CharField(max_length=10)), migrations.\n AlterField(model_name='divida', name='motivo', field=models.\n CharField(max_length=100))]\n",
"step-5": "# Generated by Django 2.2.1 on 2019-05-05 18:41\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='divida',\n name='id_cliente',\n field=models.CharField(max_length=10),\n ),\n migrations.AlterField(\n model_name='divida',\n name='motivo',\n field=models.CharField(max_length=100),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def init_setup():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
cfg.MODEL.DEVICE = 'cpu'
return cfg
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in app.config[
'ALLOWED_EXTENSIONS']
def detect_object(filename):
PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)
im = cv2.imread(TEST_IMAGE_PATH)
cfg = app.config['detectron2_cfg']
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
pred_inst = outputs['instances'].to('cpu')
show_inst = []
pred_res = []
for tc in app.config['THING_CLASSES']:
if tc not in data_set.thing_classes:
print('Thing Class:' + tc + ', Not found in the training set')
continue
t_idx = data_set.thing_classes.index(tc)
filt_inst = pred_inst[pred_inst.pred_classes == t_idx]
cat_cnt = len(filt_inst)
if cat_cnt > 0:
show_inst.append(filt_inst)
pred_res.append({'t_class': tc, 't_count': cat_cnt})
if len(show_inst) > 0:
pred_inst = Instances.cat(show_inst)
response = app.response_class(response=json.dumps({'result': pred_res}),
status=200, mimetype='application/json')
return response
@app.route('/infer', methods=['POST'])
def infer():
file = request.files['fimg']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return detect_object(filename=filename)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup_logger()
<|reserved_special_token_0|>
def init_setup():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
cfg.MODEL.DEVICE = 'cpu'
return cfg
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in app.config[
'ALLOWED_EXTENSIONS']
def detect_object(filename):
PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)
im = cv2.imread(TEST_IMAGE_PATH)
cfg = app.config['detectron2_cfg']
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
pred_inst = outputs['instances'].to('cpu')
show_inst = []
pred_res = []
for tc in app.config['THING_CLASSES']:
if tc not in data_set.thing_classes:
print('Thing Class:' + tc + ', Not found in the training set')
continue
t_idx = data_set.thing_classes.index(tc)
filt_inst = pred_inst[pred_inst.pred_classes == t_idx]
cat_cnt = len(filt_inst)
if cat_cnt > 0:
show_inst.append(filt_inst)
pred_res.append({'t_class': tc, 't_count': cat_cnt})
if len(show_inst) > 0:
pred_inst = Instances.cat(show_inst)
response = app.response_class(response=json.dumps({'result': pred_res}),
status=200, mimetype='application/json')
return response
@app.route('/infer', methods=['POST'])
def infer():
file = request.files['fimg']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return detect_object(filename=filename)
if __name__ == '__main__':
app.config['UPLOAD_FOLDER'] = '/app/imgstore/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])
app.config['detectron2_cfg'] = init_setup()
app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple',
'bottle']
app.run(debug=False, host='0.0.0.0')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup_logger()
<|reserved_special_token_0|>
app = Flask(__name__)
def init_setup():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
cfg.MODEL.DEVICE = 'cpu'
return cfg
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in app.config[
'ALLOWED_EXTENSIONS']
def detect_object(filename):
PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)
im = cv2.imread(TEST_IMAGE_PATH)
cfg = app.config['detectron2_cfg']
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
pred_inst = outputs['instances'].to('cpu')
show_inst = []
pred_res = []
for tc in app.config['THING_CLASSES']:
if tc not in data_set.thing_classes:
print('Thing Class:' + tc + ', Not found in the training set')
continue
t_idx = data_set.thing_classes.index(tc)
filt_inst = pred_inst[pred_inst.pred_classes == t_idx]
cat_cnt = len(filt_inst)
if cat_cnt > 0:
show_inst.append(filt_inst)
pred_res.append({'t_class': tc, 't_count': cat_cnt})
if len(show_inst) > 0:
pred_inst = Instances.cat(show_inst)
response = app.response_class(response=json.dumps({'result': pred_res}),
status=200, mimetype='application/json')
return response
@app.route('/infer', methods=['POST'])
def infer():
file = request.files['fimg']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return detect_object(filename=filename)
if __name__ == '__main__':
app.config['UPLOAD_FOLDER'] = '/app/imgstore/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])
app.config['detectron2_cfg'] = init_setup()
app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple',
'bottle']
app.run(debug=False, host='0.0.0.0')
<|reserved_special_token_1|>
from flask import Flask, redirect, url_for, request
from werkzeug.utils import secure_filename
import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
import numpy as np
import os, json, cv2, random
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import Instances
import os
import sys
app = Flask(__name__)
def init_setup():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
cfg.MODEL.DEVICE = 'cpu'
return cfg
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in app.config[
'ALLOWED_EXTENSIONS']
def detect_object(filename):
PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)
im = cv2.imread(TEST_IMAGE_PATH)
cfg = app.config['detectron2_cfg']
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
pred_inst = outputs['instances'].to('cpu')
show_inst = []
pred_res = []
for tc in app.config['THING_CLASSES']:
if tc not in data_set.thing_classes:
print('Thing Class:' + tc + ', Not found in the training set')
continue
t_idx = data_set.thing_classes.index(tc)
filt_inst = pred_inst[pred_inst.pred_classes == t_idx]
cat_cnt = len(filt_inst)
if cat_cnt > 0:
show_inst.append(filt_inst)
pred_res.append({'t_class': tc, 't_count': cat_cnt})
if len(show_inst) > 0:
pred_inst = Instances.cat(show_inst)
response = app.response_class(response=json.dumps({'result': pred_res}),
status=200, mimetype='application/json')
return response
@app.route('/infer', methods=['POST'])
def infer():
file = request.files['fimg']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return detect_object(filename=filename)
if __name__ == '__main__':
app.config['UPLOAD_FOLDER'] = '/app/imgstore/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])
app.config['detectron2_cfg'] = init_setup()
app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple',
'bottle']
app.run(debug=False, host='0.0.0.0')
<|reserved_special_token_1|>
#coding: utf-8
from flask import Flask, redirect, url_for, request
from werkzeug.utils import secure_filename
import torch, torchvision
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import Instances
import os
import sys
app = Flask(__name__)
def init_setup():
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.DEVICE='cpu'
return cfg
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def detect_object(filename):
PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']
TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)
PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)
im = cv2.imread(TEST_IMAGE_PATH)
cfg = app.config['detectron2_cfg']
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
# filterout bana and orage
data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
# print(data_set.thing_classes)
pred_inst = outputs["instances"].to("cpu")
show_inst = []
pred_res = []
for tc in app.config['THING_CLASSES']:
if tc not in data_set.thing_classes:
print("Thing Class:"+ tc +", Not found in the training set")
continue
t_idx = data_set.thing_classes.index(tc)
filt_inst = pred_inst[pred_inst.pred_classes == t_idx]
cat_cnt = len(filt_inst)
if cat_cnt > 0:
show_inst.append(filt_inst)
pred_res.append({"t_class": tc, "t_count":cat_cnt})
if len(show_inst) > 0:
pred_inst = Instances.cat(show_inst)
# Comment this out later
# v = Visualizer(im[:, :, ::-1],data_set , scale=0.3)
# out = v.draw_instance_predictions(pred_inst)
# cv2.imwrite(PRED_IMAGE_PATH, out.get_image()[:, :, ::-1])
response = app.response_class(
response=json.dumps({'result': pred_res}),
status=200,
mimetype='application/json'
)
return response
@app.route("/infer", methods=['POST'])
def infer():
file = request.files['fimg']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return detect_object(filename=filename)
if __name__ == '__main__':
app.config['UPLOAD_FOLDER'] = '/app/imgstore/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])
app.config['detectron2_cfg'] = init_setup()
app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple', 'bottle']
app.run(debug=False,host='0.0.0.0')
|
flexible
|
{
"blob_id": "a18e98db417fe234e3d8d5d1321203fbac18751c",
"index": 8174,
"step-1": "<mask token>\n\n\ndef init_setup():\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n cfg.MODEL.DEVICE = 'cpu'\n return cfg\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config[\n 'ALLOWED_EXTENSIONS']\n\n\ndef detect_object(filename):\n PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']\n TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)\n PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)\n im = cv2.imread(TEST_IMAGE_PATH)\n cfg = app.config['detectron2_cfg']\n predictor = DefaultPredictor(cfg)\n outputs = predictor(im)\n data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n pred_inst = outputs['instances'].to('cpu')\n show_inst = []\n pred_res = []\n for tc in app.config['THING_CLASSES']:\n if tc not in data_set.thing_classes:\n print('Thing Class:' + tc + ', Not found in the training set')\n continue\n t_idx = data_set.thing_classes.index(tc)\n filt_inst = pred_inst[pred_inst.pred_classes == t_idx]\n cat_cnt = len(filt_inst)\n if cat_cnt > 0:\n show_inst.append(filt_inst)\n pred_res.append({'t_class': tc, 't_count': cat_cnt})\n if len(show_inst) > 0:\n pred_inst = Instances.cat(show_inst)\n response = app.response_class(response=json.dumps({'result': pred_res}),\n status=200, mimetype='application/json')\n return response\n\n\n@app.route('/infer', methods=['POST'])\ndef infer():\n file = request.files['fimg']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return detect_object(filename=filename)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsetup_logger()\n<mask token>\n\n\ndef init_setup():\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n cfg.MODEL.DEVICE = 'cpu'\n return cfg\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config[\n 'ALLOWED_EXTENSIONS']\n\n\ndef detect_object(filename):\n PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']\n TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)\n PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)\n im = cv2.imread(TEST_IMAGE_PATH)\n cfg = app.config['detectron2_cfg']\n predictor = DefaultPredictor(cfg)\n outputs = predictor(im)\n data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n pred_inst = outputs['instances'].to('cpu')\n show_inst = []\n pred_res = []\n for tc in app.config['THING_CLASSES']:\n if tc not in data_set.thing_classes:\n print('Thing Class:' + tc + ', Not found in the training set')\n continue\n t_idx = data_set.thing_classes.index(tc)\n filt_inst = pred_inst[pred_inst.pred_classes == t_idx]\n cat_cnt = len(filt_inst)\n if cat_cnt > 0:\n show_inst.append(filt_inst)\n pred_res.append({'t_class': tc, 't_count': cat_cnt})\n if len(show_inst) > 0:\n pred_inst = Instances.cat(show_inst)\n response = app.response_class(response=json.dumps({'result': pred_res}),\n status=200, mimetype='application/json')\n return response\n\n\n@app.route('/infer', methods=['POST'])\ndef infer():\n file = request.files['fimg']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return detect_object(filename=filename)\n\n\nif __name__ == '__main__':\n app.config['UPLOAD_FOLDER'] = '/app/imgstore/'\n app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])\n app.config['detectron2_cfg'] = init_setup()\n app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple',\n 'bottle']\n app.run(debug=False, host='0.0.0.0')\n",
"step-3": "<mask token>\nsetup_logger()\n<mask token>\napp = Flask(__name__)\n\n\ndef init_setup():\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n cfg.MODEL.DEVICE = 'cpu'\n return cfg\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config[\n 'ALLOWED_EXTENSIONS']\n\n\ndef detect_object(filename):\n PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']\n TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)\n PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)\n im = cv2.imread(TEST_IMAGE_PATH)\n cfg = app.config['detectron2_cfg']\n predictor = DefaultPredictor(cfg)\n outputs = predictor(im)\n data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n pred_inst = outputs['instances'].to('cpu')\n show_inst = []\n pred_res = []\n for tc in app.config['THING_CLASSES']:\n if tc not in data_set.thing_classes:\n print('Thing Class:' + tc + ', Not found in the training set')\n continue\n t_idx = data_set.thing_classes.index(tc)\n filt_inst = pred_inst[pred_inst.pred_classes == t_idx]\n cat_cnt = len(filt_inst)\n if cat_cnt > 0:\n show_inst.append(filt_inst)\n pred_res.append({'t_class': tc, 't_count': cat_cnt})\n if len(show_inst) > 0:\n pred_inst = Instances.cat(show_inst)\n response = app.response_class(response=json.dumps({'result': pred_res}),\n status=200, mimetype='application/json')\n return response\n\n\n@app.route('/infer', methods=['POST'])\ndef infer():\n file = request.files['fimg']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return detect_object(filename=filename)\n\n\nif __name__ == '__main__':\n app.config['UPLOAD_FOLDER'] = '/app/imgstore/'\n app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])\n app.config['detectron2_cfg'] = init_setup()\n app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple',\n 'bottle']\n app.run(debug=False, host='0.0.0.0')\n",
"step-4": "from flask import Flask, redirect, url_for, request\nfrom werkzeug.utils import secure_filename\nimport torch, torchvision\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\nimport numpy as np\nimport os, json, cv2, random\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom detectron2.structures import Instances\nimport os\nimport sys\napp = Flask(__name__)\n\n\ndef init_setup():\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\n 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n cfg.MODEL.DEVICE = 'cpu'\n return cfg\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in app.config[\n 'ALLOWED_EXTENSIONS']\n\n\ndef detect_object(filename):\n PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']\n TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)\n PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)\n im = cv2.imread(TEST_IMAGE_PATH)\n cfg = app.config['detectron2_cfg']\n predictor = DefaultPredictor(cfg)\n outputs = predictor(im)\n data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n pred_inst = outputs['instances'].to('cpu')\n show_inst = []\n pred_res = []\n for tc in app.config['THING_CLASSES']:\n if tc not in data_set.thing_classes:\n print('Thing Class:' + tc + ', Not found in the training set')\n continue\n t_idx = data_set.thing_classes.index(tc)\n filt_inst = pred_inst[pred_inst.pred_classes == t_idx]\n cat_cnt = len(filt_inst)\n if cat_cnt > 0:\n show_inst.append(filt_inst)\n pred_res.append({'t_class': tc, 't_count': cat_cnt})\n if len(show_inst) > 0:\n pred_inst = Instances.cat(show_inst)\n response = app.response_class(response=json.dumps({'result': pred_res}),\n status=200, mimetype='application/json')\n return response\n\n\n@app.route('/infer', methods=['POST'])\ndef infer():\n file = request.files['fimg']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return detect_object(filename=filename)\n\n\nif __name__ == '__main__':\n app.config['UPLOAD_FOLDER'] = '/app/imgstore/'\n app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])\n app.config['detectron2_cfg'] = init_setup()\n app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple',\n 'bottle']\n app.run(debug=False, host='0.0.0.0')\n",
"step-5": "#coding: utf-8\n\nfrom flask import Flask, redirect, url_for, request\nfrom werkzeug.utils import secure_filename\n\nimport torch, torchvision\n\n# Setup detectron2 logger\nimport detectron2\nfrom detectron2.utils.logger import setup_logger\nsetup_logger()\n\n# import some common libraries\nimport numpy as np\nimport os, json, cv2, random\n\n# import some common detectron2 utilities\nfrom detectron2 import model_zoo\nfrom detectron2.engine import DefaultPredictor\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import Visualizer\nfrom detectron2.data import MetadataCatalog, DatasetCatalog\nfrom detectron2.structures import Instances\n\nimport os\nimport sys\n\napp = Flask(__name__)\n\ndef init_setup():\n cfg = get_cfg()\n # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\n cfg.MODEL.DEVICE='cpu'\n return cfg\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']\n\ndef detect_object(filename):\n PATH_TO_TEST_IMAGES_DIR = app.config['UPLOAD_FOLDER']\n TEST_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, filename)\n PRED_IMAGE_PATH = os.path.join(PATH_TO_TEST_IMAGES_DIR, 'pred_' + filename)\n \n im = cv2.imread(TEST_IMAGE_PATH)\n cfg = app.config['detectron2_cfg']\n\n predictor = DefaultPredictor(cfg)\n outputs = predictor(im)\n\n # filterout bana and orage\n data_set = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])\n\n # print(data_set.thing_classes)\n pred_inst = outputs[\"instances\"].to(\"cpu\")\n\n show_inst = []\n pred_res = []\n for tc in app.config['THING_CLASSES']:\n if tc not in data_set.thing_classes:\n print(\"Thing Class:\"+ tc +\", Not found in the training set\")\n continue\n t_idx = data_set.thing_classes.index(tc)\n filt_inst = pred_inst[pred_inst.pred_classes == t_idx]\n cat_cnt = len(filt_inst)\n if cat_cnt > 0:\n show_inst.append(filt_inst)\n pred_res.append({\"t_class\": tc, \"t_count\":cat_cnt})\n\n if len(show_inst) > 0:\n pred_inst = Instances.cat(show_inst)\n\n # Comment this out later\n # v = Visualizer(im[:, :, ::-1],data_set , scale=0.3)\n # out = v.draw_instance_predictions(pred_inst)\n # cv2.imwrite(PRED_IMAGE_PATH, out.get_image()[:, :, ::-1])\n \n response = app.response_class(\n response=json.dumps({'result': pred_res}),\n status=200,\n mimetype='application/json'\n )\n\n return response\n\n\n@app.route(\"/infer\", methods=['POST'])\ndef infer():\n file = request.files['fimg']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return detect_object(filename=filename)\n\nif __name__ == '__main__':\n app.config['UPLOAD_FOLDER'] = '/app/imgstore/'\n app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg'])\n app.config['detectron2_cfg'] = init_setup()\n app.config['THING_CLASSES'] = ['banana', 'orange', 'carrot', 'apple', 'bottle']\n app.run(debug=False,host='0.0.0.0')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
"""Module providing views for asset storage folder"""
from Products.Five.browser import BrowserView
from plone import api
from plone.app.contenttypes.interfaces import IImage
class AssetRepositoryView(BrowserView):
""" Folderish content page default view """
def contained_items(self, uid):
stack = api.content.get(UID=uid)
return stack.restrictedTraverse('@@folderListing')()
def item_index(self, uid):
return len(self.contained_items(uid))
def preview_image(self, uid):
images = self.contained_items(uid)
preview = None
if len(images):
first_item = images[0].getObject()
if IImage.providedBy(first_item):
preview = first_item
return preview
|
normal
|
{
"blob_id": "70c20b38edb01552a8c7531b3e87a9302ffaf6c5",
"index": 5062,
"step-1": "<mask token>\n\n\nclass AssetRepositoryView(BrowserView):\n <mask token>\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AssetRepositoryView(BrowserView):\n <mask token>\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-3": "<mask token>\n\n\nclass AssetRepositoryView(BrowserView):\n \"\"\" Folderish content page default view \"\"\"\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-4": "<mask token>\nfrom Products.Five.browser import BrowserView\nfrom plone import api\nfrom plone.app.contenttypes.interfaces import IImage\n\n\nclass AssetRepositoryView(BrowserView):\n \"\"\" Folderish content page default view \"\"\"\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Module providing views for asset storage folder\"\"\"\nfrom Products.Five.browser import BrowserView\nfrom plone import api\nfrom plone.app.contenttypes.interfaces import IImage\n\nclass AssetRepositoryView(BrowserView):\n \"\"\" Folderish content page default view \"\"\"\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import unittest
from textwrap import dedent
from simplesat import InstallRequirement, Repository
from simplesat.test_utils import packages_from_definition
from ..compute_dependencies import (compute_dependencies,
compute_leaf_packages,
compute_reverse_dependencies)
PACKAGE_DEF_0 = dedent("""\
A 0.0.0-1; depends (B ^= 0.0.0)
B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)
C 0.0.0-1; depends (E >= 1.0.0)
""")
PACKAGE_DEF_1 = dedent("""\
D 0.0.0-2
E 0.0.0-1
E 1.0.0-1
E 1.0.1-1
""")
PACKAGE_DEF_2 = dedent("""\
B 0.0.0-1; depends (D == 0.0.0-2)
C 0.0.0-1; depends (E >= 1.0.0)
""")
class TestComputeDependencies(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
self.repos = [repo_0, repo_1]
def test_no_dependency(self):
requirement = InstallRequirement._from_string('D == 0.0.0-2')
expected_deps = set()
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, expected_deps)
def test_simple_dependency(self):
requirement = InstallRequirement._from_string('C *')
expected_deps = packages_from_definition(
"""E 1.0.0-1
E 1.0.1-1""")
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_requirements(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0) """
)
deps = compute_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_requirements_transitive(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)
D 0.0.0-2 """
)
deps = compute_dependencies(self.repos, requirement, transitive=True)
self.assertEqual(deps, set(expected_deps))
class TestComputeReverseDependencies(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
self.repos = [repo_0, repo_1]
def test_no_dependency(self):
requirement = InstallRequirement._from_string('A ^= 0.0.0')
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set())
def test_simple_dependency(self):
requirement = InstallRequirement._from_string('E *')
expected_deps = packages_from_definition(
'C 0.0.0-1; depends (E >= 1.0.0)'
)
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_dependencies(self):
requirement = InstallRequirement._from_string('D ^= 0.0.0')
expected_deps = packages_from_definition(
"""B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)"""
)
deps = compute_reverse_dependencies(self.repos, requirement)
self.assertEqual(deps, set(expected_deps))
def test_chained_dependencies_transitive(self):
requirement = InstallRequirement._from_string('D ^= 0.0.0')
expected_deps = packages_from_definition(
"""A 0.0.0-1; depends (B ^= 0.0.0)
B 0.0.0-1; depends (D == 0.0.0-2)
B 0.0.0-2; depends (D ^= 0.0.0)"""
)
deps = compute_reverse_dependencies(self.repos, requirement,
transitive=True)
self.assertEqual(deps, set(expected_deps))
class TestComputeLeafPackages(unittest.TestCase):
def setUp(self):
repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))
repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))
repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))
self.repos = [repo_0, repo_1, repo_2]
def test_simple(self):
expected_leaf_packages = packages_from_definition(
"""A 0.0.0-1; depends (B ^= 0.0.0)
C 0.0.0-1; depends (E >= 1.0.0)
E 0.0.0-1 """
)
leaf_packages = compute_leaf_packages(self.repos)
self.assertEqual(leaf_packages, set(expected_leaf_packages))
|
normal
|
{
"blob_id": "fcf19c49bb161305eaa5ba8bc26e276a8e8db8ea",
"index": 3925,
"step-1": "<mask token>\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-2": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-3": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n <mask token>\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-4": "<mask token>\n\n\nclass TestComputeDependencies(unittest.TestCase):\n <mask token>\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('C *')\n expected_deps = packages_from_definition(\n \"\"\"E 1.0.0-1\n E 1.0.1-1\"\"\")\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)')\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-5": "import unittest\nfrom textwrap import dedent\n\nfrom simplesat import InstallRequirement, Repository\nfrom simplesat.test_utils import packages_from_definition\n\nfrom ..compute_dependencies import (compute_dependencies,\n compute_leaf_packages,\n compute_reverse_dependencies)\n\n\nPACKAGE_DEF_0 = dedent(\"\"\"\\\n A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n\"\"\")\n\n\nPACKAGE_DEF_1 = dedent(\"\"\"\\\n D 0.0.0-2\n E 0.0.0-1\n E 1.0.0-1\n E 1.0.1-1\n\"\"\")\n\nPACKAGE_DEF_2 = dedent(\"\"\"\\\n B 0.0.0-1; depends (D == 0.0.0-2)\n C 0.0.0-1; depends (E >= 1.0.0)\n\"\"\")\n\n\nclass TestComputeDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('D == 0.0.0-2')\n expected_deps = set()\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, expected_deps)\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('C *')\n expected_deps = packages_from_definition(\n \"\"\"E 1.0.0-1\n E 1.0.1-1\"\"\")\n\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0) \"\"\"\n )\n\n deps = compute_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_requirements_transitive(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\n D 0.0.0-2 \"\"\"\n )\n\n deps = compute_dependencies(self.repos, requirement, transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeReverseDependencies(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n self.repos = [repo_0, repo_1]\n\n def test_no_dependency(self):\n requirement = InstallRequirement._from_string('A ^= 0.0.0')\n\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set())\n\n def test_simple_dependency(self):\n requirement = InstallRequirement._from_string('E *')\n expected_deps = packages_from_definition(\n 'C 0.0.0-1; depends (E >= 1.0.0)'\n )\n\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement)\n self.assertEqual(deps, set(expected_deps))\n\n def test_chained_dependencies_transitive(self):\n requirement = InstallRequirement._from_string('D ^= 0.0.0')\n expected_deps = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n B 0.0.0-1; depends (D == 0.0.0-2)\n B 0.0.0-2; depends (D ^= 0.0.0)\"\"\"\n )\n deps = compute_reverse_dependencies(self.repos, requirement,\n transitive=True)\n self.assertEqual(deps, set(expected_deps))\n\n\nclass TestComputeLeafPackages(unittest.TestCase):\n\n def setUp(self):\n repo_0 = Repository(packages_from_definition(PACKAGE_DEF_0))\n repo_1 = Repository(packages_from_definition(PACKAGE_DEF_1))\n repo_2 = Repository(packages_from_definition(PACKAGE_DEF_2))\n self.repos = [repo_0, repo_1, repo_2]\n\n def test_simple(self):\n expected_leaf_packages = packages_from_definition(\n \"\"\"A 0.0.0-1; depends (B ^= 0.0.0)\n C 0.0.0-1; depends (E >= 1.0.0)\n E 0.0.0-1 \"\"\"\n )\n leaf_packages = compute_leaf_packages(self.repos)\n\n self.assertEqual(leaf_packages, set(expected_leaf_packages))\n",
"step-ids": [
5,
12,
13,
14,
18
]
}
|
[
5,
12,
13,
14,
18
] |
<|reserved_special_token_0|>
def follow(request):
action = request.POST.get('action')
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
if followed_user == request.user:
return JsonResponse({})
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=followed_user.
id, object_type='user', target_id=followed_user.id,
target_type='user')
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=
followed_user.id, object_type='user', target_id=
followed_user.id, target_type='user').delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
<|reserved_special_token_0|>
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
try:
output_list = paginator.page(page)
except PageNotAnInteger:
output_list = paginator.page(2)
except EmptyPage:
output_list = []
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string('social/partials/posts.html', {
'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
<|reserved_special_token_0|>
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string('social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.
MEDIA_URL})
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
<|reserved_special_token_0|>
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string('social/partials/users.html', {
'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html', {'results': results})
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def follow(request):
action = request.POST.get('action')
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
if followed_user == request.user:
return JsonResponse({})
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=followed_user.
id, object_type='user', target_id=followed_user.id,
target_type='user')
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=
followed_user.id, object_type='user', target_id=
followed_user.id, target_type='user').delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def delete(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
item = Post.objects.get(id=item_id)
messages.success(request, 'Post deleted successfully!')
try:
Notification.objects.filter(object_id=item.id, object_type='post'
).delete()
except Notification.DoesNotExist:
pass
elif item_type == 'comment':
item = Comment.objects.get(id=item_id)
messages.success(request, 'Comment deleted successfully!')
try:
Notification.objects.get(object_id=item.id, object_type='comment'
).delete()
except Notification.DoesNotExist:
pass
if item.author == request.user:
item.delete()
data['error'] = False
return JsonResponse(data)
<|reserved_special_token_0|>
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
try:
output_list = paginator.page(page)
except PageNotAnInteger:
output_list = paginator.page(2)
except EmptyPage:
output_list = []
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string('social/partials/posts.html', {
'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
def load_user_lists(request):
user_list = request.POST.get('userList')
user_id = request.POST.get('userId')
page = request.POST.get('page')
user = User.objects.get(id=user_id)
if user_list == 'posts':
posts = user.profile.get_posts(request.user)
posts = paginate_list(posts, page)
posts_html = loader.render_to_string('social/partials/posts.html',
{'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
elif user_list == 'following':
following = list(reversed(user.profile.following.all()))
following = paginate_list(following, page)
following_html = loader.render_to_string('social/partials/users.html',
{'user': request.user, 'users': following, 'MEDIA_URL':
settings.MEDIA_URL})
data['has_next'] = following.has_next()
data['list_html'] = following_html
elif user_list == 'followers':
followers = list(reversed(user.profile.followers.all()))
followers = paginate_list(followers, page)
followers_html = loader.render_to_string('social/partials/users.html',
{'user': request.user, 'users': followers, 'MEDIA_URL':
settings.MEDIA_URL})
data['has_next'] = followers.has_next()
data['list_html'] = followers_html
elif user_list == 'liked':
liked_posts = c.liked(request.user)
liked_posts = paginate_list(liked_posts, page)
liked_html = loader.render_to_string('social/partials/posts.html',
{'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = liked_posts.has_next()
data['list_html'] = liked_html
return JsonResponse(data)
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string('social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.
MEDIA_URL})
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
<|reserved_special_token_0|>
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string('social/partials/users.html', {
'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html', {'results': results})
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
def load_notifications(request):
page = request.POST.get('page')
notifs = Notification.objects.filter(target_type='user', target_id=
request.user.id).order_by('-created_at')
notifs = paginate_list(notifs, page)
notifications = []
for n in notifs:
notif = Notify(n)
notification = notif.get()
notifications.append({'message': notification, 'date': n.created_at})
if n.is_read == False:
n.is_read = True
n.save()
notifs_html = loader.render_to_string('social/partials/notifications.html',
{'notifications': notifications})
data['has_next'] = notifs.has_next()
data['notifs_html'] = notifs_html
return JsonResponse(data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def like(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
liked_object = Post.objects.get(id=item_id)
elif item_type == 'comment':
liked_object = Comment.objects.get(id=item_id)
target = liked_object.author if item_type != 'user' else liked_object
if request.user.is_authenticated:
like = Like.objects.filter(item_id=item_id, item_type=item_type,
user=request.user)
if like.exists():
like.delete()
try:
Notification.objects.get(actor_id=request.user.id,
actor_type='user', verb='like', object_id=liked_object.
id, object_type=item_type, target_id=target.id,
target_type='user').delete()
except Notification.DoesNotExist:
pass
else:
like = Like.objects.create(item_id=item_id, item_type=item_type,
user=request.user)
if like.user != target:
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='like', object_id=liked_object.
id, object_type=item_type, target_id=target.id,
target_type='user')
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def follow(request):
action = request.POST.get('action')
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
if followed_user == request.user:
return JsonResponse({})
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=followed_user.
id, object_type='user', target_id=followed_user.id,
target_type='user')
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=
followed_user.id, object_type='user', target_id=
followed_user.id, target_type='user').delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def delete(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
item = Post.objects.get(id=item_id)
messages.success(request, 'Post deleted successfully!')
try:
Notification.objects.filter(object_id=item.id, object_type='post'
).delete()
except Notification.DoesNotExist:
pass
elif item_type == 'comment':
item = Comment.objects.get(id=item_id)
messages.success(request, 'Comment deleted successfully!')
try:
Notification.objects.get(object_id=item.id, object_type='comment'
).delete()
except Notification.DoesNotExist:
pass
if item.author == request.user:
item.delete()
data['error'] = False
return JsonResponse(data)
def comment(request):
if request.user.is_authenticated():
data['auth'] = True
form = CommentForm(request.POST)
if form.is_valid():
post_id = request.POST.get('post_id')
content = request.POST.get('content')
page = request.POST.get('page')
post = Post.objects.get(id=post_id)
comment = Comment.objects.create(content=content, post=post,
author=request.user)
show_comment_actions = True if page == 'post' else False
comment_html = loader.render_to_string(
'social/partials/latest-comment.html', {'comment': comment,
'current_user': request.user, 'show_comment_actions':
show_comment_actions})
data['comment_html'] = comment_html
data['errors'] = False
if post.author != comment.author:
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='comment', object_id=comment.id,
object_type='comment', target_id=post.author.id,
target_type='user')
else:
data['errors'] = form.errors
else:
data['auth'] = False
return JsonResponse(data)
<|reserved_special_token_0|>
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
try:
output_list = paginator.page(page)
except PageNotAnInteger:
output_list = paginator.page(2)
except EmptyPage:
output_list = []
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string('social/partials/posts.html', {
'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
def load_user_lists(request):
user_list = request.POST.get('userList')
user_id = request.POST.get('userId')
page = request.POST.get('page')
user = User.objects.get(id=user_id)
if user_list == 'posts':
posts = user.profile.get_posts(request.user)
posts = paginate_list(posts, page)
posts_html = loader.render_to_string('social/partials/posts.html',
{'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
elif user_list == 'following':
following = list(reversed(user.profile.following.all()))
following = paginate_list(following, page)
following_html = loader.render_to_string('social/partials/users.html',
{'user': request.user, 'users': following, 'MEDIA_URL':
settings.MEDIA_URL})
data['has_next'] = following.has_next()
data['list_html'] = following_html
elif user_list == 'followers':
followers = list(reversed(user.profile.followers.all()))
followers = paginate_list(followers, page)
followers_html = loader.render_to_string('social/partials/users.html',
{'user': request.user, 'users': followers, 'MEDIA_URL':
settings.MEDIA_URL})
data['has_next'] = followers.has_next()
data['list_html'] = followers_html
elif user_list == 'liked':
liked_posts = c.liked(request.user)
liked_posts = paginate_list(liked_posts, page)
liked_html = loader.render_to_string('social/partials/posts.html',
{'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = liked_posts.has_next()
data['list_html'] = liked_html
return JsonResponse(data)
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string('social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.
MEDIA_URL})
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
<|reserved_special_token_0|>
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string('social/partials/users.html', {
'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html', {'results': results})
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
def load_notifications(request):
page = request.POST.get('page')
notifs = Notification.objects.filter(target_type='user', target_id=
request.user.id).order_by('-created_at')
notifs = paginate_list(notifs, page)
notifications = []
for n in notifs:
notif = Notify(n)
notification = notif.get()
notifications.append({'message': notification, 'date': n.created_at})
if n.is_read == False:
n.is_read = True
n.save()
notifs_html = loader.render_to_string('social/partials/notifications.html',
{'notifications': notifications})
data['has_next'] = notifs.has_next()
data['notifs_html'] = notifs_html
return JsonResponse(data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def like(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
liked_object = Post.objects.get(id=item_id)
elif item_type == 'comment':
liked_object = Comment.objects.get(id=item_id)
target = liked_object.author if item_type != 'user' else liked_object
if request.user.is_authenticated:
like = Like.objects.filter(item_id=item_id, item_type=item_type,
user=request.user)
if like.exists():
like.delete()
try:
Notification.objects.get(actor_id=request.user.id,
actor_type='user', verb='like', object_id=liked_object.
id, object_type=item_type, target_id=target.id,
target_type='user').delete()
except Notification.DoesNotExist:
pass
else:
like = Like.objects.create(item_id=item_id, item_type=item_type,
user=request.user)
if like.user != target:
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='like', object_id=liked_object.
id, object_type=item_type, target_id=target.id,
target_type='user')
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def follow(request):
action = request.POST.get('action')
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
if followed_user == request.user:
return JsonResponse({})
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=followed_user.
id, object_type='user', target_id=followed_user.id,
target_type='user')
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(actor_id=request.user.id,
actor_type='user', verb='follow', object_id=
followed_user.id, object_type='user', target_id=
followed_user.id, target_type='user').delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def delete(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
item = Post.objects.get(id=item_id)
messages.success(request, 'Post deleted successfully!')
try:
Notification.objects.filter(object_id=item.id, object_type='post'
).delete()
except Notification.DoesNotExist:
pass
elif item_type == 'comment':
item = Comment.objects.get(id=item_id)
messages.success(request, 'Comment deleted successfully!')
try:
Notification.objects.get(object_id=item.id, object_type='comment'
).delete()
except Notification.DoesNotExist:
pass
if item.author == request.user:
item.delete()
data['error'] = False
return JsonResponse(data)
def comment(request):
if request.user.is_authenticated():
data['auth'] = True
form = CommentForm(request.POST)
if form.is_valid():
post_id = request.POST.get('post_id')
content = request.POST.get('content')
page = request.POST.get('page')
post = Post.objects.get(id=post_id)
comment = Comment.objects.create(content=content, post=post,
author=request.user)
show_comment_actions = True if page == 'post' else False
comment_html = loader.render_to_string(
'social/partials/latest-comment.html', {'comment': comment,
'current_user': request.user, 'show_comment_actions':
show_comment_actions})
data['comment_html'] = comment_html
data['errors'] = False
if post.author != comment.author:
Notification.objects.create(actor_id=request.user.id,
actor_type='user', verb='comment', object_id=comment.id,
object_type='comment', target_id=post.author.id,
target_type='user')
else:
data['errors'] = form.errors
else:
data['auth'] = False
return JsonResponse(data)
def clear_image(request):
item_id = int(request.POST.get('itemId'))
item_type = request.POST.get('itemType')
if item_type == 'post':
Post.objects.get(id=item_id, author=request.user
).featured_image.delete(save=True)
elif item_type == 'user' and item_id == request.user.id:
User.objects.get(id=item_id).profile.profile_photo.delete(save=True)
messages.success(request, 'Image successfully removed!')
return JsonResponse(data)
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
try:
output_list = paginator.page(page)
except PageNotAnInteger:
output_list = paginator.page(2)
except EmptyPage:
output_list = []
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string('social/partials/posts.html', {
'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
def load_user_lists(request):
user_list = request.POST.get('userList')
user_id = request.POST.get('userId')
page = request.POST.get('page')
user = User.objects.get(id=user_id)
if user_list == 'posts':
posts = user.profile.get_posts(request.user)
posts = paginate_list(posts, page)
posts_html = loader.render_to_string('social/partials/posts.html',
{'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
elif user_list == 'following':
following = list(reversed(user.profile.following.all()))
following = paginate_list(following, page)
following_html = loader.render_to_string('social/partials/users.html',
{'user': request.user, 'users': following, 'MEDIA_URL':
settings.MEDIA_URL})
data['has_next'] = following.has_next()
data['list_html'] = following_html
elif user_list == 'followers':
followers = list(reversed(user.profile.followers.all()))
followers = paginate_list(followers, page)
followers_html = loader.render_to_string('social/partials/users.html',
{'user': request.user, 'users': followers, 'MEDIA_URL':
settings.MEDIA_URL})
data['has_next'] = followers.has_next()
data['list_html'] = followers_html
elif user_list == 'liked':
liked_posts = c.liked(request.user)
liked_posts = paginate_list(liked_posts, page)
liked_html = loader.render_to_string('social/partials/posts.html',
{'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = liked_posts.has_next()
data['list_html'] = liked_html
return JsonResponse(data)
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string('social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.
MEDIA_URL})
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
<|reserved_special_token_0|>
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string('social/partials/users.html', {
'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html', {'results': results})
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
def load_notifications(request):
page = request.POST.get('page')
notifs = Notification.objects.filter(target_type='user', target_id=
request.user.id).order_by('-created_at')
notifs = paginate_list(notifs, page)
notifications = []
for n in notifs:
notif = Notify(n)
notification = notif.get()
notifications.append({'message': notification, 'date': n.created_at})
if n.is_read == False:
n.is_read = True
n.save()
notifs_html = loader.render_to_string('social/partials/notifications.html',
{'notifications': notifications})
data['has_next'] = notifs.has_next()
data['notifs_html'] = notifs_html
return JsonResponse(data)
<|reserved_special_token_1|>
# views which respond to ajax requests
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.models import User
from social.models import Like, Post, Comment, Notification
from social.notifications import Notify
from social.forms import CommentForm
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.template import loader
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from social.collections import Collections
from watson import search as watson
c = Collections()
data = {}
# like or unlike posts, kraks, users or comments
def like(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
# get notification data
if item_type == "post":
liked_object = Post.objects.get(id=item_id)
elif item_type == "comment":
liked_object = Comment.objects.get(id=item_id)
target = liked_object.author if item_type != "user" else liked_object
# user must be authenticated to like/unlike
if request.user.is_authenticated:
like = Like.objects.filter(item_id=item_id, item_type=item_type, user=request.user)
if like.exists():
# unlike
like.delete()
# delete notification
try:
Notification.objects.get(
actor_id=request.user.id,
actor_type="user",
verb="like",
object_id=liked_object.id,
object_type=item_type,
target_id=target.id,
target_type="user"
).delete()
except Notification.DoesNotExist:
pass
else:
# like
like = Like.objects.create(item_id=item_id, item_type=item_type, user=request.user)
# create notification
# NB: users should not be notified of their actions on objects they created
if like.user != target:
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="like",
object_id=liked_object.id,
object_type=item_type,
target_id=target.id,
target_type="user"
)
data['auth'] = True
else: # anonymous user
data['auth'] = False
return JsonResponse(data)
# follow or unfollow users
def follow(request):
action = request.POST.get('action') # follow/unfollow
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
# users cannot follow themselves
if followed_user == request.user:
return JsonResponse({})
# user must be authenticated to follow/unfollow
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
# create notification
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="follow",
object_id=followed_user.id,
object_type="user",
target_id=followed_user.id,
target_type="user"
)
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(
actor_id=request.user.id,
actor_type="user",
verb="follow",
object_id=followed_user.id,
object_type="user",
target_id=followed_user.id,
target_type="user"
).delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def delete(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
item = Post.objects.get(id=item_id)
messages.success(request, "Post deleted successfully!")
# delete notifications associated with this post
try:
Notification.objects.filter(
object_id=item.id,
object_type="post"
).delete()
except Notification.DoesNotExist:
pass
elif item_type == 'comment':
item = Comment.objects.get(id=item_id)
messages.success(request, "Comment deleted successfully!")
# delete notifications associated with this comment
try:
Notification.objects.get(
object_id=item.id,
object_type="comment"
).delete()
except Notification.DoesNotExist:
pass
if item.author == request.user:
item.delete()
data['error'] = False
return JsonResponse(data)
def comment(request):
if request.user.is_authenticated():
data['auth'] = True;
form = CommentForm(request.POST)
if form.is_valid():
post_id = request.POST.get('post_id')
content = request.POST.get('content')
page = request.POST.get('page')
post = Post.objects.get(id=post_id)
comment = Comment.objects.create(content=content, post=post, author=request.user)
show_comment_actions = True if page == "post" else False
comment_html = loader.render_to_string(
'social/partials/latest-comment.html', {
'comment': comment,
'current_user': request.user,
'show_comment_actions': show_comment_actions
},
)
data['comment_html'] = comment_html
data['errors'] = False
# create notification
if post.author != comment.author:
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="comment",
object_id=comment.id,
object_type="comment",
target_id=post.author.id,
target_type="user"
)
else:
data['errors'] = form.errors
else:
data['auth'] = False
return JsonResponse(data)
def clear_image(request):
item_id = int(request.POST.get('itemId'))
item_type = request.POST.get('itemType')
if item_type == 'post':
Post.objects.get(id=item_id, author=request.user).featured_image.delete(save=True)
elif item_type == 'user' and item_id == request.user.id:
User.objects.get(id=item_id).profile.profile_photo.delete(save=True)
messages.success(request, 'Image successfully removed!')
return JsonResponse(data)
#### LAZY LOADING ####
######################
# META
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
# paginate
try:
output_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver 2nd page.
output_list = paginator.page(2)
except EmptyPage:
# If page is out of range (e.g. 9999), return empty list
output_list = []
# push to template
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
def load_user_lists(request):
user_list = request.POST.get('userList') # posts, following, followers, liked posts
user_id = request.POST.get('userId')
page = request.POST.get('page')
user = User.objects.get(id=user_id)
if user_list == 'posts':
posts = user.profile.get_posts(request.user)
posts = paginate_list(posts, page)
posts_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': posts, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
elif user_list == 'following':
following = list(reversed(user.profile.following.all()))
following = paginate_list(following, page)
following_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': following, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = following.has_next()
data['list_html'] = following_html
elif user_list == 'followers':
followers = list(reversed(user.profile.followers.all()))
followers = paginate_list(followers, page)
followers_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': followers, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = followers.has_next()
data['list_html'] = followers_html
elif user_list == 'liked':
liked_posts = c.liked(request.user)
liked_posts = paginate_list(liked_posts, page)
liked_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = liked_posts.has_next()
data['list_html'] = liked_html
return JsonResponse(data)
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string(
'social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
def load_popular(request):
page = request.POST.get('page')
popular_posts = c.popular(request.user)
popular_posts = paginate_list(popular_posts, page, 15)
popular_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': popular_posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = popular_posts.has_next()
data['list_html'] = popular_html
return JsonResponse(data)
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html',
{'results': results},
)
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
def load_notifications(request):
page = request.POST.get('page')
notifs = Notification.objects.filter(target_type="user", target_id=request.user.id).order_by('-created_at')
notifs = paginate_list(notifs, page)
notifications = []
for n in notifs:
notif = Notify(n)
notification = notif.get()
notifications.append({'message': notification, 'date': n.created_at})
# mark unread notification as read
if n.is_read == False:
n.is_read = True
n.save()
notifs_html = loader.render_to_string(
'social/partials/notifications.html',
{'notifications': notifications},
)
data['has_next'] = notifs.has_next()
data['notifs_html'] = notifs_html
return JsonResponse(data)
|
flexible
|
{
"blob_id": "0b4f070d30642449536118accffa371a89dd3075",
"index": 8857,
"step-1": "<mask token>\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef delete(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n item = Post.objects.get(id=item_id)\n messages.success(request, 'Post deleted successfully!')\n try:\n Notification.objects.filter(object_id=item.id, object_type='post'\n ).delete()\n except Notification.DoesNotExist:\n pass\n elif item_type == 'comment':\n item = Comment.objects.get(id=item_id)\n messages.success(request, 'Comment deleted successfully!')\n try:\n Notification.objects.get(object_id=item.id, object_type='comment'\n ).delete()\n except Notification.DoesNotExist:\n pass\n if item.author == request.user:\n item.delete()\n data['error'] = False\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\ndef load_user_lists(request):\n user_list = request.POST.get('userList')\n user_id = request.POST.get('userId')\n page = request.POST.get('page')\n user = User.objects.get(id=user_id)\n if user_list == 'posts':\n posts = user.profile.get_posts(request.user)\n posts = paginate_list(posts, page)\n posts_html = loader.render_to_string('social/partials/posts.html',\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n elif user_list == 'following':\n following = list(reversed(user.profile.following.all()))\n following = paginate_list(following, page)\n following_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': following, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = following.has_next()\n data['list_html'] = following_html\n elif user_list == 'followers':\n followers = list(reversed(user.profile.followers.all()))\n followers = paginate_list(followers, page)\n followers_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': followers, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = followers.has_next()\n data['list_html'] = followers_html\n elif user_list == 'liked':\n liked_posts = c.liked(request.user)\n liked_posts = paginate_list(liked_posts, page)\n liked_html = loader.render_to_string('social/partials/posts.html',\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = liked_posts.has_next()\n data['list_html'] = liked_html\n return JsonResponse(data)\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\ndef load_notifications(request):\n page = request.POST.get('page')\n notifs = Notification.objects.filter(target_type='user', target_id=\n request.user.id).order_by('-created_at')\n notifs = paginate_list(notifs, page)\n notifications = []\n for n in notifs:\n notif = Notify(n)\n notification = notif.get()\n notifications.append({'message': notification, 'date': n.created_at})\n if n.is_read == False:\n n.is_read = True\n n.save()\n notifs_html = loader.render_to_string('social/partials/notifications.html',\n {'notifications': notifications})\n data['has_next'] = notifs.has_next()\n data['notifs_html'] = notifs_html\n return JsonResponse(data)\n",
"step-3": "<mask token>\n\n\ndef like(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n liked_object = Post.objects.get(id=item_id)\n elif item_type == 'comment':\n liked_object = Comment.objects.get(id=item_id)\n target = liked_object.author if item_type != 'user' else liked_object\n if request.user.is_authenticated:\n like = Like.objects.filter(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.exists():\n like.delete()\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n else:\n like = Like.objects.create(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.user != target:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user')\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef delete(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n item = Post.objects.get(id=item_id)\n messages.success(request, 'Post deleted successfully!')\n try:\n Notification.objects.filter(object_id=item.id, object_type='post'\n ).delete()\n except Notification.DoesNotExist:\n pass\n elif item_type == 'comment':\n item = Comment.objects.get(id=item_id)\n messages.success(request, 'Comment deleted successfully!')\n try:\n Notification.objects.get(object_id=item.id, object_type='comment'\n ).delete()\n except Notification.DoesNotExist:\n pass\n if item.author == request.user:\n item.delete()\n data['error'] = False\n return JsonResponse(data)\n\n\ndef comment(request):\n if request.user.is_authenticated():\n data['auth'] = True\n form = CommentForm(request.POST)\n if form.is_valid():\n post_id = request.POST.get('post_id')\n content = request.POST.get('content')\n page = request.POST.get('page')\n post = Post.objects.get(id=post_id)\n comment = Comment.objects.create(content=content, post=post,\n author=request.user)\n show_comment_actions = True if page == 'post' else False\n comment_html = loader.render_to_string(\n 'social/partials/latest-comment.html', {'comment': comment,\n 'current_user': request.user, 'show_comment_actions':\n show_comment_actions})\n data['comment_html'] = comment_html\n data['errors'] = False\n if post.author != comment.author:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='comment', object_id=comment.id,\n object_type='comment', target_id=post.author.id,\n target_type='user')\n else:\n data['errors'] = form.errors\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\ndef load_user_lists(request):\n user_list = request.POST.get('userList')\n user_id = request.POST.get('userId')\n page = request.POST.get('page')\n user = User.objects.get(id=user_id)\n if user_list == 'posts':\n posts = user.profile.get_posts(request.user)\n posts = paginate_list(posts, page)\n posts_html = loader.render_to_string('social/partials/posts.html',\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n elif user_list == 'following':\n following = list(reversed(user.profile.following.all()))\n following = paginate_list(following, page)\n following_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': following, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = following.has_next()\n data['list_html'] = following_html\n elif user_list == 'followers':\n followers = list(reversed(user.profile.followers.all()))\n followers = paginate_list(followers, page)\n followers_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': followers, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = followers.has_next()\n data['list_html'] = followers_html\n elif user_list == 'liked':\n liked_posts = c.liked(request.user)\n liked_posts = paginate_list(liked_posts, page)\n liked_html = loader.render_to_string('social/partials/posts.html',\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = liked_posts.has_next()\n data['list_html'] = liked_html\n return JsonResponse(data)\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\ndef load_notifications(request):\n page = request.POST.get('page')\n notifs = Notification.objects.filter(target_type='user', target_id=\n request.user.id).order_by('-created_at')\n notifs = paginate_list(notifs, page)\n notifications = []\n for n in notifs:\n notif = Notify(n)\n notification = notif.get()\n notifications.append({'message': notification, 'date': n.created_at})\n if n.is_read == False:\n n.is_read = True\n n.save()\n notifs_html = loader.render_to_string('social/partials/notifications.html',\n {'notifications': notifications})\n data['has_next'] = notifs.has_next()\n data['notifs_html'] = notifs_html\n return JsonResponse(data)\n",
"step-4": "<mask token>\n\n\ndef like(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n liked_object = Post.objects.get(id=item_id)\n elif item_type == 'comment':\n liked_object = Comment.objects.get(id=item_id)\n target = liked_object.author if item_type != 'user' else liked_object\n if request.user.is_authenticated:\n like = Like.objects.filter(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.exists():\n like.delete()\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n else:\n like = Like.objects.create(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.user != target:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user')\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef delete(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n item = Post.objects.get(id=item_id)\n messages.success(request, 'Post deleted successfully!')\n try:\n Notification.objects.filter(object_id=item.id, object_type='post'\n ).delete()\n except Notification.DoesNotExist:\n pass\n elif item_type == 'comment':\n item = Comment.objects.get(id=item_id)\n messages.success(request, 'Comment deleted successfully!')\n try:\n Notification.objects.get(object_id=item.id, object_type='comment'\n ).delete()\n except Notification.DoesNotExist:\n pass\n if item.author == request.user:\n item.delete()\n data['error'] = False\n return JsonResponse(data)\n\n\ndef comment(request):\n if request.user.is_authenticated():\n data['auth'] = True\n form = CommentForm(request.POST)\n if form.is_valid():\n post_id = request.POST.get('post_id')\n content = request.POST.get('content')\n page = request.POST.get('page')\n post = Post.objects.get(id=post_id)\n comment = Comment.objects.create(content=content, post=post,\n author=request.user)\n show_comment_actions = True if page == 'post' else False\n comment_html = loader.render_to_string(\n 'social/partials/latest-comment.html', {'comment': comment,\n 'current_user': request.user, 'show_comment_actions':\n show_comment_actions})\n data['comment_html'] = comment_html\n data['errors'] = False\n if post.author != comment.author:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='comment', object_id=comment.id,\n object_type='comment', target_id=post.author.id,\n target_type='user')\n else:\n data['errors'] = form.errors\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef clear_image(request):\n item_id = int(request.POST.get('itemId'))\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n Post.objects.get(id=item_id, author=request.user\n ).featured_image.delete(save=True)\n elif item_type == 'user' and item_id == request.user.id:\n User.objects.get(id=item_id).profile.profile_photo.delete(save=True)\n messages.success(request, 'Image successfully removed!')\n return JsonResponse(data)\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\ndef load_user_lists(request):\n user_list = request.POST.get('userList')\n user_id = request.POST.get('userId')\n page = request.POST.get('page')\n user = User.objects.get(id=user_id)\n if user_list == 'posts':\n posts = user.profile.get_posts(request.user)\n posts = paginate_list(posts, page)\n posts_html = loader.render_to_string('social/partials/posts.html',\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n elif user_list == 'following':\n following = list(reversed(user.profile.following.all()))\n following = paginate_list(following, page)\n following_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': following, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = following.has_next()\n data['list_html'] = following_html\n elif user_list == 'followers':\n followers = list(reversed(user.profile.followers.all()))\n followers = paginate_list(followers, page)\n followers_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': followers, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = followers.has_next()\n data['list_html'] = followers_html\n elif user_list == 'liked':\n liked_posts = c.liked(request.user)\n liked_posts = paginate_list(liked_posts, page)\n liked_html = loader.render_to_string('social/partials/posts.html',\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = liked_posts.has_next()\n data['list_html'] = liked_html\n return JsonResponse(data)\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\ndef load_notifications(request):\n page = request.POST.get('page')\n notifs = Notification.objects.filter(target_type='user', target_id=\n request.user.id).order_by('-created_at')\n notifs = paginate_list(notifs, page)\n notifications = []\n for n in notifs:\n notif = Notify(n)\n notification = notif.get()\n notifications.append({'message': notification, 'date': n.created_at})\n if n.is_read == False:\n n.is_read = True\n n.save()\n notifs_html = loader.render_to_string('social/partials/notifications.html',\n {'notifications': notifications})\n data['has_next'] = notifs.has_next()\n data['notifs_html'] = notifs_html\n return JsonResponse(data)\n",
"step-5": "# views which respond to ajax requests\r\n\r\nfrom django.contrib import messages\r\nfrom django.conf import settings\r\nfrom django.contrib.auth.models import User\r\nfrom social.models import Like, Post, Comment, Notification\r\nfrom social.notifications import Notify\r\nfrom social.forms import CommentForm\r\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\r\nfrom django.template import loader\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom social.collections import Collections\r\nfrom watson import search as watson\r\n\r\nc = Collections()\r\ndata = {}\r\n\r\n# like or unlike posts, kraks, users or comments\r\ndef like(request):\r\n item_id = request.POST.get('itemId')\r\n item_type = request.POST.get('itemType')\r\n\r\n # get notification data\r\n if item_type == \"post\":\r\n liked_object = Post.objects.get(id=item_id)\r\n elif item_type == \"comment\":\r\n liked_object = Comment.objects.get(id=item_id)\r\n target = liked_object.author if item_type != \"user\" else liked_object\r\n\r\n # user must be authenticated to like/unlike\r\n if request.user.is_authenticated:\r\n like = Like.objects.filter(item_id=item_id, item_type=item_type, user=request.user)\r\n if like.exists():\r\n # unlike\r\n like.delete()\r\n # delete notification\r\n try:\r\n Notification.objects.get(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"like\",\r\n object_id=liked_object.id,\r\n object_type=item_type,\r\n target_id=target.id,\r\n target_type=\"user\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n else:\r\n # like\r\n like = Like.objects.create(item_id=item_id, item_type=item_type, user=request.user)\r\n # create notification\r\n # NB: users should not be notified of their actions on objects they created\r\n if like.user != target:\r\n Notification.objects.create(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"like\",\r\n object_id=liked_object.id,\r\n object_type=item_type,\r\n target_id=target.id,\r\n target_type=\"user\"\r\n )\r\n data['auth'] = True\r\n else: # anonymous user\r\n data['auth'] = False\r\n return JsonResponse(data)\r\n\r\n\r\n# follow or unfollow users\r\ndef follow(request):\r\n action = request.POST.get('action') # follow/unfollow\r\n followed_user_id = request.POST.get('followedUserId')\r\n followed_user = User.objects.get(id=followed_user_id)\r\n\r\n # users cannot follow themselves\r\n if followed_user == request.user:\r\n return JsonResponse({})\r\n\r\n # user must be authenticated to follow/unfollow\r\n if request.user.is_authenticated():\r\n if action == 'follow':\r\n followed_user.profile.followers.add(request.user)\r\n request.user.profile.following.add(followed_user)\r\n # create notification\r\n Notification.objects.create(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"follow\",\r\n object_id=followed_user.id,\r\n object_type=\"user\",\r\n target_id=followed_user.id,\r\n target_type=\"user\"\r\n )\r\n elif action == 'unfollow':\r\n followed_user.profile.followers.remove(request.user)\r\n request.user.profile.following.remove(followed_user)\r\n try:\r\n Notification.objects.get(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"follow\",\r\n object_id=followed_user.id,\r\n object_type=\"user\",\r\n target_id=followed_user.id,\r\n target_type=\"user\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n data['auth'] = True\r\n else:\r\n data['auth'] = False\r\n return JsonResponse(data)\r\n\r\n\r\ndef delete(request):\r\n item_id = request.POST.get('itemId')\r\n item_type = request.POST.get('itemType')\r\n\r\n if item_type == 'post':\r\n item = Post.objects.get(id=item_id)\r\n messages.success(request, \"Post deleted successfully!\")\r\n # delete notifications associated with this post\r\n try:\r\n Notification.objects.filter(\r\n object_id=item.id,\r\n object_type=\"post\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n elif item_type == 'comment':\r\n item = Comment.objects.get(id=item_id)\r\n messages.success(request, \"Comment deleted successfully!\")\r\n # delete notifications associated with this comment\r\n try:\r\n Notification.objects.get(\r\n object_id=item.id,\r\n object_type=\"comment\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n\r\n if item.author == request.user:\r\n item.delete()\r\n data['error'] = False\r\n return JsonResponse(data)\r\n\r\n\r\ndef comment(request):\r\n if request.user.is_authenticated():\r\n data['auth'] = True;\r\n form = CommentForm(request.POST)\r\n if form.is_valid():\r\n post_id = request.POST.get('post_id')\r\n content = request.POST.get('content')\r\n page = request.POST.get('page')\r\n post = Post.objects.get(id=post_id)\r\n comment = Comment.objects.create(content=content, post=post, author=request.user)\r\n show_comment_actions = True if page == \"post\" else False \r\n comment_html = loader.render_to_string(\r\n 'social/partials/latest-comment.html', {\r\n 'comment': comment, \r\n 'current_user': request.user, \r\n 'show_comment_actions': show_comment_actions\r\n },\r\n )\r\n data['comment_html'] = comment_html\r\n data['errors'] = False\r\n # create notification\r\n if post.author != comment.author:\r\n Notification.objects.create(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"comment\",\r\n object_id=comment.id,\r\n object_type=\"comment\",\r\n target_id=post.author.id,\r\n target_type=\"user\"\r\n )\r\n else:\r\n data['errors'] = form.errors\r\n else:\r\n data['auth'] = False\r\n \r\n return JsonResponse(data)\r\n\r\n\r\ndef clear_image(request):\r\n item_id = int(request.POST.get('itemId'))\r\n item_type = request.POST.get('itemType')\r\n\r\n if item_type == 'post':\r\n Post.objects.get(id=item_id, author=request.user).featured_image.delete(save=True)\r\n elif item_type == 'user' and item_id == request.user.id:\r\n User.objects.get(id=item_id).profile.profile_photo.delete(save=True)\r\n\r\n messages.success(request, 'Image successfully removed!')\r\n return JsonResponse(data)\r\n\r\n\r\n#### LAZY LOADING ####\r\n######################\r\n\r\n# META\r\ndef paginate_list(input_list, page, results_per_page=10):\r\n paginator = Paginator(input_list, results_per_page)\r\n # paginate\r\n try:\r\n output_list = paginator.page(page)\r\n except PageNotAnInteger:\r\n # If page is not an integer, deliver 2nd page.\r\n output_list = paginator.page(2)\r\n except EmptyPage:\r\n # If page is out of range (e.g. 9999), return empty list\r\n output_list = []\r\n # push to template\r\n return output_list\r\n\r\n\r\ndef load_feeds(request):\r\n page = request.POST.get('page')\r\n\r\n posts = c.feed(request.user)\r\n posts = paginate_list(posts, page, 15)\r\n posts_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = posts.has_next()\r\n data['list_html'] = posts_html\r\n\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_user_lists(request):\r\n user_list = request.POST.get('userList') # posts, following, followers, liked posts\r\n user_id = request.POST.get('userId')\r\n page = request.POST.get('page')\r\n user = User.objects.get(id=user_id)\r\n\r\n if user_list == 'posts':\r\n posts = user.profile.get_posts(request.user)\r\n posts = paginate_list(posts, page)\r\n posts_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = posts.has_next()\r\n data['list_html'] = posts_html\r\n elif user_list == 'following':\r\n following = list(reversed(user.profile.following.all()))\r\n following = paginate_list(following, page)\r\n following_html = loader.render_to_string(\r\n 'social/partials/users.html',\r\n {'user': request.user, 'users': following, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = following.has_next()\r\n data['list_html'] = following_html\r\n elif user_list == 'followers':\r\n followers = list(reversed(user.profile.followers.all()))\r\n followers = paginate_list(followers, page)\r\n followers_html = loader.render_to_string(\r\n 'social/partials/users.html',\r\n {'user': request.user, 'users': followers, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = followers.has_next()\r\n data['list_html'] = followers_html\r\n elif user_list == 'liked':\r\n liked_posts = c.liked(request.user)\r\n liked_posts = paginate_list(liked_posts, page)\r\n liked_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = liked_posts.has_next()\r\n data['list_html'] = liked_html\r\n return JsonResponse(data)\r\n\r\n \r\ndef load_comments(request):\r\n post_id = request.POST.get('postId')\r\n page = request.POST.get('page')\r\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\r\n comments = paginate_list(comments, page)\r\n comments_html = loader.render_to_string(\r\n 'social/partials/comments.html',\r\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = comments.has_next()\r\n data['comments_html'] = comments_html\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_popular(request):\r\n page = request.POST.get('page')\r\n\r\n popular_posts = c.popular(request.user)\r\n popular_posts = paginate_list(popular_posts, page, 15)\r\n popular_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': popular_posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = popular_posts.has_next()\r\n data['list_html'] = popular_html\r\n\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_users(request):\r\n page = request.POST.get('page')\r\n\r\n users = c.popular_users(request.user)\r\n users = paginate_list(users, page, 15)\r\n users_html = loader.render_to_string(\r\n 'social/partials/users.html',\r\n {'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = users.has_next()\r\n data['list_html'] = users_html\r\n\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_search_results(request):\r\n q = request.POST.get('q')\r\n page = request.POST.get('page')\r\n results = watson.search(q)\r\n results = paginate_list(results, page)\r\n results_html = loader.render_to_string(\r\n 'social/partials/search-results.html',\r\n {'results': results},\r\n )\r\n data['has_next'] = results.has_next()\r\n data['results_html'] = results_html\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_notifications(request):\r\n page = request.POST.get('page')\r\n notifs = Notification.objects.filter(target_type=\"user\", target_id=request.user.id).order_by('-created_at')\r\n notifs = paginate_list(notifs, page)\r\n notifications = []\r\n for n in notifs:\r\n notif = Notify(n)\r\n notification = notif.get()\r\n notifications.append({'message': notification, 'date': n.created_at})\r\n # mark unread notification as read\r\n if n.is_read == False:\r\n n.is_read = True\r\n n.save()\r\n\r\n notifs_html = loader.render_to_string(\r\n 'social/partials/notifications.html',\r\n {'notifications': notifications},\r\n )\r\n data['has_next'] = notifs.has_next()\r\n data['notifs_html'] = notifs_html\r\n return JsonResponse(data)",
"step-ids": [
6,
9,
11,
12,
16
]
}
|
[
6,
9,
11,
12,
16
] |
import numpy as np
import yaml
import pickle
import os
from flask import Flask, request, jsonify, render_template, redirect, url_for, flash
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, EqualTo
from wtforms.validators import InputRequired, Email, Length
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
app = Flask(__name__)
model = pickle.load(open('model_GB.pkl', 'rb'))
ENV = 'prod'
def get_config(fname):
'''
Creates connection to yaml file which holds the DB user and pass
'''
with open(fname) as f:
cfg = yaml.load(f, Loader=yaml.SafeLoader)
return cfg
if ENV == 'dev':
cfg = get_config('config.yml')
connection = cfg['connection'][ENV]
app.config['SECRET_KEY'] = connection['secret_key']
app.debug = True
app.config[connection['username']] = connection['password']
app.config['TESTING'] = False
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = True
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = connection['mail_user']
app.config['MAIL_PASSWORD'] = connection['mail_pass']
app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'
app.config['MAIL_MAX_EMAILS'] = None
app.config['MAIL_ASCII_ATTACHMENTS'] = False
else:
app.debug = False
app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']
app.config['MAIL_PORT'] = 25
app.config['MAIL_USE_TLS'] = False
app.config['MAIL__USE_SSL'] = False
app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
mail = Mail(app)
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
def get_reset_token(self, expires_seconds = 1800):
s = Serializer(app.config['SECRET_KEY'], expires_seconds)
return s.dumps({'user_id' : self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return user.query.get(user_id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
remember = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class UpdateAccountForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])
submit = SubmitField('Update')
def validate_username(self, username):
'''
Raises a validation error if a user tries to register using an existing username
'''
if username.data != current_user.username:
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username Taken')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user:
raise ValidationError('Email Taken')
class RequestResetForm(FlaskForm):
email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
'''
Raises a validation error if a user tries to register using an existing email
'''
if email.data != current_user.email:
user = User.query.filter_by(email = email.data).first()
if user is None:
raise ValidationError('There is no accouunt with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
@app.route('/',methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/error/')
def error():
return render_template('error.html')
@app.route('/login_error/')
def login_error():
return render_template('login_error.html')
@app.route('/login/',methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username = form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
flash('Account Created For {}!'.format(form.username.data))
return redirect(url_for('model_page'))
else:
return redirect(url_for('login_error'))
return render_template('login.html', form=form)
@app.route('/signup/', methods = ['GET','POST'])
def signup():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)
db.session.add(new_user)
db.session.commit()
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
else:
return render_template('signup.html', form = form, message= 'Username / Email Already Exists')
# return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'
return render_template('signup.html', form = form)
@app.route('/logout/')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/learn_more/',methods=['GET', 'POST'])
def learn_more():
return render_template('learn_more.html')
@app.route('/email_sent/',methods=['GET', 'POST'])
def email_sent():
return render_template('email_sent.html')
@app.route('/account/',methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your account has been updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title = 'Account', form = form)
@app.route('/model_page/', methods = ['GET','POST'])
@login_required
def model_page():
return render_template('model_page.html')
def send_reset_email(user):
token = user.get_reset_token()
msg = Message(subject = 'Password Reset Request',
sender = 'noreply@syndicate.com',
recipients=[user.email])
msg.body = f''' To reset your password, visit the following link :
{url_for('reset_token', token = token, _external = True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg)
@app.route('/reset_password/',methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
flask('An email has been sent with instructions to resset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Rest Password', form = form)
@app.route('/reset_password/<token>',methods=['GET', 'POST'])
def reset_token(token):
if current_user.is_authenticated:
return redirect(url_for('home'))
user = User.verify_reset_token(token)
if user is None:
flash('That is an invalid / expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long
user.password = hashed_password
db.session.commit()
flash('Your password has been updated!', 'success')
# send congrat email for registering
# msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get("MAIL_USERNAME"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')
# mail.send(msg)
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Rest Password', form = form)
@app.route('/predict_model', methods=['GET', 'POST'])
def predict_model():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}
output = map_dict[output]
return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))
if __name__ == "__main__":
if ENV == 'prod':
app.run()
else:
app.run(debug=True)
|
normal
|
{
"blob_id": "f6a3693fe81e629d987067265bf4e410bf260bcf",
"index": 1663,
"step-1": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True)\n email = db.Column(db.String(50), unique=True)\n password = db.Column(db.String(80))\n\n def get_reset_token(self, expires_seconds=1800):\n s = Serializer(app.config['SECRET_KEY'], expires_seconds)\n return s.dumps({'user_id': self.id}).decode('utf-8')\n\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return user.query.get(user_id)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass LoginForm(FlaskForm):\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n remember = BooleanField('Remember Me')\n\n\nclass RegisterForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass UpdateAccountForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass RequestResetForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user is None:\n raise ValidationError(\n 'There is no accouunt with that email. You must register first.'\n )\n\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[\n DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n flash('Account Created For {}!'.format(form.username.data))\n return redirect(url_for('model_page'))\n else:\n return redirect(url_for('login_error'))\n return render_template('login.html', form=form)\n\n\n<mask token>\n\n\n@app.route('/learn_more/', methods=['GET', 'POST'])\ndef learn_more():\n return render_template('learn_more.html')\n\n\n<mask token>\n\n\n@app.route('/model_page/', methods=['GET', 'POST'])\n@login_required\ndef model_page():\n return render_template('model_page.html')\n\n\ndef send_reset_email(user):\n token = user.get_reset_token()\n msg = Message(subject='Password Reset Request', sender=\n 'noreply@syndicate.com', recipients=[user.email])\n msg.body = f\"\"\" To reset your password, visit the following link :\n{url_for('reset_token', token=token, _external=True)}\n\nIf you did not make this request then simply ignore this email and no changes will be made.\n\"\"\"\n mail.send(msg)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_config(fname):\n \"\"\"\n Creates connection to yaml file which holds the DB user and pass\n \"\"\"\n with open(fname) as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n return cfg\n\n\n<mask token>\n\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True)\n email = db.Column(db.String(50), unique=True)\n password = db.Column(db.String(80))\n\n def get_reset_token(self, expires_seconds=1800):\n s = Serializer(app.config['SECRET_KEY'], expires_seconds)\n return s.dumps({'user_id': self.id}).decode('utf-8')\n\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return user.query.get(user_id)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass LoginForm(FlaskForm):\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n remember = BooleanField('Remember Me')\n\n\nclass RegisterForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass UpdateAccountForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass RequestResetForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user is None:\n raise ValidationError(\n 'There is no accouunt with that email. You must register first.'\n )\n\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[\n DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n flash('Account Created For {}!'.format(form.username.data))\n return redirect(url_for('model_page'))\n else:\n return redirect(url_for('login_error'))\n return render_template('login.html', form=form)\n\n\n<mask token>\n\n\n@app.route('/learn_more/', methods=['GET', 'POST'])\ndef learn_more():\n return render_template('learn_more.html')\n\n\n<mask token>\n\n\n@app.route('/model_page/', methods=['GET', 'POST'])\n@login_required\ndef model_page():\n return render_template('model_page.html')\n\n\ndef send_reset_email(user):\n token = user.get_reset_token()\n msg = Message(subject='Password Reset Request', sender=\n 'noreply@syndicate.com', recipients=[user.email])\n msg.body = f\"\"\" To reset your password, visit the following link :\n{url_for('reset_token', token=token, _external=True)}\n\nIf you did not make this request then simply ignore this email and no changes will be made.\n\"\"\"\n mail.send(msg)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_config(fname):\n \"\"\"\n Creates connection to yaml file which holds the DB user and pass\n \"\"\"\n with open(fname) as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n return cfg\n\n\n<mask token>\n\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True)\n email = db.Column(db.String(50), unique=True)\n password = db.Column(db.String(80))\n\n def get_reset_token(self, expires_seconds=1800):\n s = Serializer(app.config['SECRET_KEY'], expires_seconds)\n return s.dumps({'user_id': self.id}).decode('utf-8')\n\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return user.query.get(user_id)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass LoginForm(FlaskForm):\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n remember = BooleanField('Remember Me')\n\n\nclass RegisterForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass UpdateAccountForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass RequestResetForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user is None:\n raise ValidationError(\n 'There is no accouunt with that email. You must register first.'\n )\n\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[\n DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n return render_template('index.html')\n\n\n<mask token>\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n flash('Account Created For {}!'.format(form.username.data))\n return redirect(url_for('model_page'))\n else:\n return redirect(url_for('login_error'))\n return render_template('login.html', form=form)\n\n\n<mask token>\n\n\n@app.route('/logout/')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route('/learn_more/', methods=['GET', 'POST'])\ndef learn_more():\n return render_template('learn_more.html')\n\n\n<mask token>\n\n\n@app.route('/model_page/', methods=['GET', 'POST'])\n@login_required\ndef model_page():\n return render_template('model_page.html')\n\n\ndef send_reset_email(user):\n token = user.get_reset_token()\n msg = Message(subject='Password Reset Request', sender=\n 'noreply@syndicate.com', recipients=[user.email])\n msg.body = f\"\"\" To reset your password, visit the following link :\n{url_for('reset_token', token=token, _external=True)}\n\nIf you did not make this request then simply ignore this email and no changes will be made.\n\"\"\"\n mail.send(msg)\n\n\n<mask token>\n\n\n@app.route('/reset_password/<token>', methods=['GET', 'POST'])\ndef reset_token(token):\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n user = User.verify_reset_token(token)\n if user is None:\n flash('That is an invalid / expired token', 'warning')\n return redirect(url_for('reset_request'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n hashed_password = generate_password_hash(form.password.data, method\n ='sha256')\n user.password = hashed_password\n db.session.commit()\n flash('Your password has been updated!', 'success')\n return redirect(url_for('login'))\n return render_template('reset_token.html', title='Rest Password', form=form\n )\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_config(fname):\n \"\"\"\n Creates connection to yaml file which holds the DB user and pass\n \"\"\"\n with open(fname) as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n return cfg\n\n\nif ENV == 'dev':\n cfg = get_config('config.yml')\n connection = cfg['connection'][ENV]\n app.config['SECRET_KEY'] = connection['secret_key']\n app.debug = True\n app.config[connection['username']] = connection['password']\n app.config['TESTING'] = False\n app.config['MAIL_SERVER'] = 'smtp.gmail.com'\n app.config['MAIL_PORT'] = 25\n app.config['MAIL_USE_TLS'] = True\n app.config['MAIL__USE_SSL'] = False\n app.config['MAIL_USERNAME'] = connection['mail_user']\n app.config['MAIL_PASSWORD'] = connection['mail_pass']\n app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'\n app.config['MAIL_MAX_EMAILS'] = None\n app.config['MAIL_ASCII_ATTACHMENTS'] = False\nelse:\n app.debug = False\n app.config['SECRET_KEY'] = os.environ['SECRET_KEY']\n app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']\n app.config['MAIL_PORT'] = 25\n app.config['MAIL_USE_TLS'] = False\n app.config['MAIL__USE_SSL'] = False\n app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']\n app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']\n app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\n<mask token>\nBootstrap(app)\n<mask token>\nlogin_manager.init_app(app)\n<mask token>\n\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True)\n email = db.Column(db.String(50), unique=True)\n password = db.Column(db.String(80))\n\n def get_reset_token(self, expires_seconds=1800):\n s = Serializer(app.config['SECRET_KEY'], expires_seconds)\n return s.dumps({'user_id': self.id}).decode('utf-8')\n\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return user.query.get(user_id)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass LoginForm(FlaskForm):\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n remember = BooleanField('Remember Me')\n\n\nclass RegisterForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n password = PasswordField('Password', validators=[InputRequired(),\n Length(min=8, max=80)])\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass UpdateAccountForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n username = StringField('UserName', validators=[InputRequired(), Length(\n min=4, max=15)])\n submit = SubmitField('Update')\n\n def validate_username(self, username):\n \"\"\"\n Raises a validation error if a user tries to register using an existing username\n \"\"\"\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\n\nclass RequestResetForm(FlaskForm):\n email = StringField('email', validators=[InputRequired(), Email(message\n ='Invalid Email'), Length(max=50)])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n \"\"\"\n Raises a validation error if a user tries to register using an existing email\n \"\"\"\n if email.data != current_user.email:\n user = User.query.filter_by(email=email.data).first()\n if user is None:\n raise ValidationError(\n 'There is no accouunt with that email. You must register first.'\n )\n\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators=[DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators=[\n DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n return render_template('index.html')\n\n\n@app.route('/error/')\ndef error():\n return render_template('error.html')\n\n\n@app.route('/login_error/')\ndef login_error():\n return render_template('login_error.html')\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n flash('Account Created For {}!'.format(form.username.data))\n return redirect(url_for('model_page'))\n else:\n return redirect(url_for('login_error'))\n return render_template('login.html', form=form)\n\n\n@app.route('/signup/', methods=['GET', 'POST'])\ndef signup():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegisterForm()\n if form.validate_on_submit():\n hashed_password = generate_password_hash(form.password.data, method\n ='sha256')\n new_user = User(username=form.username.data, email=form.email.data,\n password=hashed_password)\n db.session.add(new_user)\n db.session.commit()\n return redirect(url_for('login'))\n else:\n return render_template('signup.html', form=form, message=\n 'Username / Email Already Exists')\n return render_template('signup.html', form=form)\n\n\n@app.route('/logout/')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route('/learn_more/', methods=['GET', 'POST'])\ndef learn_more():\n return render_template('learn_more.html')\n\n\n@app.route('/email_sent/', methods=['GET', 'POST'])\ndef email_sent():\n return render_template('email_sent.html')\n\n\n@app.route('/account/', methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n return render_template('account.html', title='Account', form=form)\n\n\n@app.route('/model_page/', methods=['GET', 'POST'])\n@login_required\ndef model_page():\n return render_template('model_page.html')\n\n\ndef send_reset_email(user):\n token = user.get_reset_token()\n msg = Message(subject='Password Reset Request', sender=\n 'noreply@syndicate.com', recipients=[user.email])\n msg.body = f\"\"\" To reset your password, visit the following link :\n{url_for('reset_token', token=token, _external=True)}\n\nIf you did not make this request then simply ignore this email and no changes will be made.\n\"\"\"\n mail.send(msg)\n\n\n@app.route('/reset_password/', methods=['GET', 'POST'])\ndef reset_request():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RequestResetForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n flask(\n 'An email has been sent with instructions to resset your password',\n 'info')\n return redirect(url_for('login'))\n return render_template('reset_request.html', title='Rest Password',\n form=form)\n\n\n@app.route('/reset_password/<token>', methods=['GET', 'POST'])\ndef reset_token(token):\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n user = User.verify_reset_token(token)\n if user is None:\n flash('That is an invalid / expired token', 'warning')\n return redirect(url_for('reset_request'))\n form = ResetPasswordForm()\n if form.validate_on_submit():\n hashed_password = generate_password_hash(form.password.data, method\n ='sha256')\n user.password = hashed_password\n db.session.commit()\n flash('Your password has been updated!', 'success')\n return redirect(url_for('login'))\n return render_template('reset_token.html', title='Rest Password', form=form\n )\n\n\n@app.route('/predict_model', methods=['GET', 'POST'])\ndef predict_model():\n int_features = [int(x) for x in request.form.values()]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n output = round(prediction[0], 2)\n map_dict = {(1): 'DT Toronto', (3): 'North York', (4): 'Scarborough', (\n 6): 'Etobicoke'}\n output = map_dict[output]\n return render_template('model_page.html', prediction_text=\n 'The Crime Occurred in : {}'.format(output))\n\n\nif __name__ == '__main__':\n if ENV == 'prod':\n app.run()\n else:\n app.run(debug=True)\n",
"step-5": "import numpy as np\nimport yaml\nimport pickle\nimport os\n\nfrom flask import Flask, request, jsonify, render_template, redirect, url_for, flash\nfrom flask_mail import Mail, Message\nfrom flask_wtf import FlaskForm\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bootstrap import Bootstrap\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField\nfrom wtforms.validators import ValidationError, DataRequired, EqualTo\nfrom wtforms.validators import InputRequired, Email, Length\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n\n\napp = Flask(__name__)\nmodel = pickle.load(open('model_GB.pkl', 'rb'))\n\nENV = 'prod'\n\ndef get_config(fname):\n '''\n Creates connection to yaml file which holds the DB user and pass\n '''\n with open(fname) as f:\n cfg = yaml.load(f, Loader=yaml.SafeLoader)\n return cfg\n\nif ENV == 'dev':\n\n cfg = get_config('config.yml')\n connection = cfg['connection'][ENV]\n app.config['SECRET_KEY'] = connection['secret_key']\n app.debug = True\n app.config[connection['username']] = connection['password']\n\n app.config['TESTING'] = False\n app.config['MAIL_SERVER'] = 'smtp.gmail.com'\n app.config['MAIL_PORT'] = 25\n app.config['MAIL_USE_TLS'] = True\n app.config['MAIL__USE_SSL'] = False\n app.config['MAIL_USERNAME'] = connection['mail_user']\n app.config['MAIL_PASSWORD'] = connection['mail_pass']\n app.config['MAIL_DEFAULT_SENDER'] = 'mail@syndicate.com'\n app.config['MAIL_MAX_EMAILS'] = None\n app.config['MAIL_ASCII_ATTACHMENTS'] = False\n\nelse:\n app.debug = False\n app.config['SECRET_KEY'] = os.environ['SECRET_KEY']\n app.config['MAIL_SERVER'] = os.environ['MAIL_SERVER']\n app.config['MAIL_PORT'] = 25\n app.config['MAIL_USE_TLS'] = False\n app.config['MAIL__USE_SSL'] = False\n app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']\n app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']\n\n app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\nmail = Mail(app)\nBootstrap(app)\ndb = SQLAlchemy(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True)\n email = db.Column(db.String(50), unique=True)\n password = db.Column(db.String(80))\n\n def get_reset_token(self, expires_seconds = 1800):\n s = Serializer(app.config['SECRET_KEY'], expires_seconds)\n return s.dumps({'user_id' : self.id}).decode('utf-8')\n\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return user.query.get(user_id)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass LoginForm(FlaskForm):\n username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])\n password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])\n remember = BooleanField('Remember Me')\n\nclass RegisterForm(FlaskForm):\n email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])\n username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])\n password = PasswordField('Password', validators = [InputRequired(), Length(min = 8, max = 80)])\n\n def validate_username(self, username):\n '''\n Raises a validation error if a user tries to register using an existing username\n '''\n user = User.query.filter_by(username = username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n '''\n Raises a validation error if a user tries to register using an existing email\n '''\n user = User.query.filter_by(email = email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\nclass UpdateAccountForm(FlaskForm):\n email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])\n username = StringField('UserName', validators = [InputRequired(), Length(min = 4, max = 15)])\n\n submit = SubmitField('Update')\n def validate_username(self, username):\n '''\n Raises a validation error if a user tries to register using an existing username\n '''\n if username.data != current_user.username:\n user = User.query.filter_by(username = username.data).first()\n if user:\n raise ValidationError('Username Taken')\n\n def validate_email(self, email):\n '''\n Raises a validation error if a user tries to register using an existing email\n '''\n if email.data != current_user.email:\n user = User.query.filter_by(email = email.data).first()\n if user:\n raise ValidationError('Email Taken')\n\nclass RequestResetForm(FlaskForm):\n email = StringField('email', validators = [InputRequired(), Email(message = 'Invalid Email'), Length(max = 50)])\n submit = SubmitField('Request Password Reset')\n\n def validate_email(self, email):\n '''\n Raises a validation error if a user tries to register using an existing email\n '''\n if email.data != current_user.email:\n user = User.query.filter_by(email = email.data).first()\n if user is None:\n raise ValidationError('There is no accouunt with that email. You must register first.')\n\nclass ResetPasswordForm(FlaskForm):\n password = PasswordField('Password', validators = [DataRequired()])\n confirm_password = PasswordField('Confirm Password', validators = [DataRequired(), EqualTo('password')])\n submit = SubmitField('Reset Password')\n\n\n\n\n@app.route('/',methods=['GET', 'POST'])\ndef home():\n return render_template('index.html')\n\n@app.route('/error/')\ndef error():\n return render_template('error.html')\n\n@app.route('/login_error/')\ndef login_error():\n return render_template('login_error.html')\n\n@app.route('/login/',methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(username = form.username.data).first()\n if user:\n if check_password_hash(user.password, form.password.data):\n login_user(user, remember = form.remember.data)\n flash('Account Created For {}!'.format(form.username.data))\n return redirect(url_for('model_page'))\n else:\n return redirect(url_for('login_error'))\n\n return render_template('login.html', form=form)\n\n@app.route('/signup/', methods = ['GET','POST'])\ndef signup():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = RegisterForm()\n\n if form.validate_on_submit():\n hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long\n new_user = User(username = form.username.data, email = form.email.data, password = hashed_password)\n db.session.add(new_user)\n db.session.commit()\n\n # send congrat email for registering\n # msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get(\"MAIL_USERNAME\"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')\n # mail.send(msg)\n\n return redirect(url_for('login'))\n else:\n return render_template('signup.html', form = form, message= 'Username / Email Already Exists')\n # return '<h1>' + form.email.data + ' ' + form.username.data + ' ' + form.password.data + '<h1>'\n return render_template('signup.html', form = form)\n\n@app.route('/logout/')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n@app.route('/learn_more/',methods=['GET', 'POST'])\ndef learn_more():\n return render_template('learn_more.html')\n\n@app.route('/email_sent/',methods=['GET', 'POST'])\ndef email_sent():\n return render_template('email_sent.html')\n\n@app.route('/account/',methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Your account has been updated', 'success')\n return redirect(url_for('account'))\n\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n\n return render_template('account.html', title = 'Account', form = form)\n\n@app.route('/model_page/', methods = ['GET','POST'])\n@login_required\ndef model_page():\n return render_template('model_page.html')\n\ndef send_reset_email(user):\n token = user.get_reset_token()\n msg = Message(subject = 'Password Reset Request',\n sender = 'noreply@syndicate.com',\n recipients=[user.email])\n msg.body = f''' To reset your password, visit the following link :\n{url_for('reset_token', token = token, _external = True)}\n\nIf you did not make this request then simply ignore this email and no changes will be made.\n'''\n mail.send(msg)\n\n\n@app.route('/reset_password/',methods=['GET', 'POST'])\ndef reset_request():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n form = RequestResetForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email = form.email.data).first()\n flask('An email has been sent with instructions to resset your password', 'info')\n return redirect(url_for('login'))\n\n return render_template('reset_request.html', title = 'Rest Password', form = form)\n\n@app.route('/reset_password/<token>',methods=['GET', 'POST'])\ndef reset_token(token):\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n\n user = User.verify_reset_token(token)\n if user is None:\n flash('That is an invalid / expired token', 'warning')\n return redirect(url_for('reset_request'))\n\n form = ResetPasswordForm()\n if form.validate_on_submit():\n hashed_password = generate_password_hash(form.password.data, method = 'sha256') # sha256 will generate a hash which is 80 chars long\n user.password = hashed_password\n db.session.commit()\n flash('Your password has been updated!', 'success')\n # send congrat email for registering\n # msg = Message(subject = 'Welcome {}'.format(form.username.data), sender = app.config.get(\"MAIL_USERNAME\"), recipients = [str(form.email.data)], body = 'Congratulations you have signed up and your account has been created!')\n # mail.send(msg)\n return redirect(url_for('login'))\n return render_template('reset_token.html', title = 'Rest Password', form = form)\n\n\n\n@app.route('/predict_model', methods=['GET', 'POST'])\ndef predict_model():\n int_features = [int(x) for x in request.form.values()]\n final_features = [np.array(int_features)]\n prediction = model.predict(final_features)\n\n output = round(prediction[0], 2)\n map_dict = {1 : 'DT Toronto', 3 : 'North York', 4 : 'Scarborough', 6 : 'Etobicoke'}\n output = map_dict[output]\n return render_template('model_page.html', prediction_text = 'The Crime Occurred in : {}'.format(output))\n\nif __name__ == \"__main__\":\n if ENV == 'prod':\n app.run()\n else:\n app.run(debug=True)\n",
"step-ids": [
25,
26,
28,
36,
39
]
}
|
[
25,
26,
28,
36,
39
] |
<|reserved_special_token_0|>
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
<|reserved_special_token_0|>
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
<|reserved_special_token_1|>
import os
from pathlib import Path
import Algorithmia
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in
fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in
fnames], 'filterName': filter_name}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
<|reserved_special_token_1|>
import os
from pathlib import Path
import Algorithmia
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {
"images": [ORIGINAL_DATA_DIR + Path(im).name for im in fnames],
"savePaths": [TRANSFERD_DATA_DIR + Path(im).name for im in fnames],
"filterName": filter_name
}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
|
flexible
|
{
"blob_id": "2536b22c2d154e87bdecb72cc967d8c56ddb73fb",
"index": 609,
"step-1": "<mask token>\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\n<mask token>\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-2": "<mask token>\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-3": "<mask token>\nAPI_KEY = os.environ.get('ALGO_API_KEY')\nDATA_DIR_BASE = os.environ.get('DATA_DIR')\nORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'\nTRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-4": "import os\nfrom pathlib import Path\nimport Algorithmia\nAPI_KEY = os.environ.get('ALGO_API_KEY')\nDATA_DIR_BASE = os.environ.get('DATA_DIR')\nORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'\nTRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-5": "import os\nfrom pathlib import Path\n\nimport Algorithmia\n\n\nAPI_KEY = os.environ.get('ALGO_API_KEY')\nDATA_DIR_BASE = os.environ.get('DATA_DIR')\nORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'\nTRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n\n upload(client, fnames)\n inputs = {\n \"images\": [ORIGINAL_DATA_DIR + Path(im).name for im in fnames],\n \"savePaths\": [TRANSFERD_DATA_DIR + Path(im).name for im in fnames],\n \"filterName\": filter_name\n }\n\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n\n download(client, out_folder)\n return result\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.
import scipy.ndimage as nd
import argparse
import numpy as np
from pybh import tensorpack_utils
import data_record
from pybh import serialization
from pybh import msgpack_utils
from pybh import lmdb_utils
from pybh.utils import argparse_bool, logged_time_measurement
from pybh import log_utils
logger = log_utils.get_logger("reward_learning/split_data_lmdb")
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1,
batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle)
data_dict_df.reset_state()
assert(split_ratio1 > 0)
assert(split_ratio1 < 1)
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write("Data split will result in empty data set\n")
sys.exit(1)
logger.info("Splitting {} samples into {} train and {} test samples".format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn("Dropping {} samples from input dataset".format(num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True)
with logged_time_measurement(logger, "Writing train dataset to {} ...".format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size,
write_frequency=10,
serialization_name=serialization_name,
compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, "Writing test dataset to {} ...".format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size,
write_frequency=10,
serialization_name=serialization_name,
compression=compression,
compression_arg=compression_arg,
reset_df_state=False)
logger.info("Tagging as train and test")
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item("__train__", msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item("__test__", msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert(lmdb_df.size() == num_samples1)
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert(lmdb_df.size() == num_samples2)
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, "Computing data statistics for {}".format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df))
# TODO: Hack to get rid of float64 in HDF5 dataset
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info("Writing data statistics to {}".format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item("__stats__", data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2,
args.split_ratio1, args.batch_size,
args.shuffle, args.serialization,
args.compression, args.compression_arg,
args.max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count',
default=0, help='Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default="pickle")
parser.add_argument('--compression', type=str, default="lz4")
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help="Ratio of data to write to output path 1")
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a283fd1e4098ea8bb3cc3580438c90e5932ba22f",
"index": 5852,
"step-1": "<mask token>\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nlogger = log_utils.get_logger('reward_learning/split_data_lmdb')\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport scipy.ndimage as nd\nimport argparse\nimport numpy as np\nfrom pybh import tensorpack_utils\nimport data_record\nfrom pybh import serialization\nfrom pybh import msgpack_utils\nfrom pybh import lmdb_utils\nfrom pybh.utils import argparse_bool, logged_time_measurement\nfrom pybh import log_utils\nlogger = log_utils.get_logger('reward_learning/split_data_lmdb')\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\n# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.\nimport scipy.ndimage as nd\n\nimport argparse\nimport numpy as np\nfrom pybh import tensorpack_utils\nimport data_record\nfrom pybh import serialization\nfrom pybh import msgpack_utils\nfrom pybh import lmdb_utils\nfrom pybh.utils import argparse_bool, logged_time_measurement\nfrom pybh import log_utils\n\n\nlogger = log_utils.get_logger(\"reward_learning/split_data_lmdb\")\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1,\n batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle)\n data_dict_df.reset_state()\n\n assert(split_ratio1 > 0)\n assert(split_ratio1 < 1)\n\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write(\"Data split will result in empty data set\\n\")\n sys.exit(1)\n\n logger.info(\"Splitting {} samples into {} train and {} test samples\".format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn(\"Dropping {} samples from input dataset\".format(num_samples - num_samples1 - num_samples2))\n\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True)\n with logged_time_measurement(logger, \"Writing train dataset to {} ...\".format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size,\n write_frequency=10,\n serialization_name=serialization_name,\n compression=compression,\n compression_arg=compression_arg)\n\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, \"Writing test dataset to {} ...\".format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size,\n write_frequency=10,\n serialization_name=serialization_name,\n compression=compression,\n compression_arg=compression_arg,\n reset_df_state=False)\n\n logger.info(\"Tagging as train and test\")\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item(\"__train__\", msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item(\"__test__\", msgpack_utils.dumps(True))\n\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert(lmdb_df.size() == num_samples1)\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert(lmdb_df.size() == num_samples2)\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, \"Computing data statistics for {}\".format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df))\n\n # TODO: Hack to get rid of float64 in HDF5 dataset\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32)\n\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info(\"Writing data statistics to {}\".format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item(\"__stats__\", data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2,\n args.split_ratio1, args.batch_size,\n args.shuffle, args.serialization,\n args.compression, args.compression_arg,\n args.max_num_samples)\n\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count',\n default=0, help='Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default=\"pickle\")\n parser.add_argument('--compression', type=str, default=\"lz4\")\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\"Ratio of data to write to output path 1\")\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n\n args = parser.parse_args()\n\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
imgpath1 = 'C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png'
imgpath2 = 'C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
titles = ['Pepper Gray', 'Peppers Color']
images = [img1, img2]
for i in range(2):
plt.subplot(1, 2, i + 1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
imgpath1 = 'C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png'
imgpath2 = 'C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
titles = ['Pepper Gray', 'Peppers Color']
images = [img1, img2]
for i in range(2):
plt.subplot(1, 2, i + 1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
def main():
imgpath1 = 'C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png'
imgpath2 = 'C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png'
img1 = cv2.imread(imgpath1, 1)
img2 = cv2.imread(imgpath2, 1)
titles = ['Pepper Gray', 'Peppers Color']
images = [img1, img2]
for i in range(2):
plt.subplot(1, 2, i + 1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#Displaying multiple images using matplotlib
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
def main():
imgpath1="C:\Shreyas\OpenCv\DIP_OpenCV\lena.png"
imgpath2="C:\Shreyas\OpenCv\DIP_OpenCV\lena.png"
img1=cv2.imread(imgpath1,1)
img2=cv2.imread(imgpath2,1)
titles = ['Pepper Gray', 'Peppers Color']
images = [img1, img2]
for i in range(2):
plt.subplot(1, 2, i+1)
plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "2867a7b24b4911b2936cb34653fa57431c14d6a3",
"index": 7319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n imgpath1 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n imgpath2 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n img1 = cv2.imread(imgpath1, 1)\n img2 = cv2.imread(imgpath2, 1)\n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n imgpath1 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n imgpath2 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n img1 = cv2.imread(imgpath1, 1)\n img2 = cv2.imread(imgpath2, 1)\n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef main():\n imgpath1 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n imgpath2 = 'C:\\\\Shreyas\\\\OpenCv\\\\DIP_OpenCV\\\\lena.png'\n img1 = cv2.imread(imgpath1, 1)\n img2 = cv2.imread(imgpath2, 1)\n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#Displaying multiple images using matplotlib\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\ndef main():\n \n imgpath1=\"C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png\"\n imgpath2=\"C:\\Shreyas\\OpenCv\\DIP_OpenCV\\lena.png\"\n \n img1=cv2.imread(imgpath1,1) \n img2=cv2.imread(imgpath2,1)\n \n titles = ['Pepper Gray', 'Peppers Color']\n images = [img1, img2]\n \n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.imshow(images[i])\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n\n plt.show() \n \nif __name__ == \"__main__\":\n main()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from datetime import datetime, timedelta
def sendmail(subject, template, to, context):
template_str = 'app/' + template + '.html'
html_msg = render_to_string(template_str, {'data': context})
plain_msg = strip_tags(html_msg)
from_email = 'ridham.shah.aditi@gmail.com'
send_mail(subject, plain_msg, from_email, to, html_message=html_msg)
|
normal
|
{
"blob_id": "0349a8a4841b024afd77d20ae18810645fad41cd",
"index": 4883,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sendmail(subject, template, to, context):\n template_str = 'app/' + template + '.html'\n html_msg = render_to_string(template_str, {'data': context})\n plain_msg = strip_tags(html_msg)\n from_email = 'ridham.shah.aditi@gmail.com'\n send_mail(subject, plain_msg, from_email, to, html_message=html_msg)\n",
"step-3": "from django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom datetime import datetime, timedelta\n\n\ndef sendmail(subject, template, to, context):\n template_str = 'app/' + template + '.html'\n html_msg = render_to_string(template_str, {'data': context})\n plain_msg = strip_tags(html_msg)\n from_email = 'ridham.shah.aditi@gmail.com'\n send_mail(subject, plain_msg, from_email, to, html_message=html_msg)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.db.models import Sum, Count
from django.db.models.functions import Coalesce
from django.utils.timezone import localtime
from .models import Quote, Vote
import pygal
from pygal.style import Style
style = Style(
background='transparent',
plot_background='transparent',
foreground='#3d3d3d',
foreground_strong='#303030',
foreground_subtle='#939393',
opacity='.8',
opacity_hover='.9',
colors=('#fa5555', '#888'),
label_font_size=15,
major_label_font_size=15,
title_font_size=20,
legend_font_size=15
)
MONTHS = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
quotes = Quote.objects.annotate(score=Coalesce(Sum('vote__value'), 0), votes=Count('vote')).filter(approved=True)
votes = Vote.objects
class QuotesOverTime():
def __init__(self):
self.chart = pygal.DateTimeLine(
title='Quotes over Time',
x_label_rotation=90,
x_value_formatter=lambda dt: dt.strftime('%b %Y'),
margin=20,
show_legend=False,
show_dots=False,
fill=True,
style=style
)
def pull(self):
data = {}
for quote in quotes.order_by('timestamp'):
timestamp = quote.timestamp.timestamp()
data[timestamp] = data.get(timestamp, 0)
data[timestamp] += 1
return data
def generate(self):
data = self.pull()
points = []
total = 0
for key, value in data.items():
points.append((key, total))
total += value
self.chart.add('quotes', points)
return self.chart.render(is_unicode=True)
class QuotesByHour():
def __init__(self):
self.chart = pygal.Bar(
title='Quotes by Hour',
x_labels = list(map(str, range(24))),
margin=20,
show_legend=False,
style=style
)
def pull(self):
data = [0 for _ in range(24)]
for quote in quotes:
data[localtime(quote.timestamp).hour] += 1
return data
def generate(self):
data = self.pull()
self.chart.add('quotes', data)
return self.chart.render(is_unicode=True)
class QuotesByMonth():
def __init__(self):
self.chart = pygal.Bar(
title='Quotes by Month',
x_labels = MONTHS,
margin=20,
show_legend=False,
style=style
)
def pull(self):
data = [0 for _ in range(12)]
for quote in quotes:
data[localtime(quote.timestamp).month-1] += 1
return data
def generate(self):
data = self.pull()
self.chart.add('quotes', data)
return self.chart.render(is_unicode=True)
class QuotesByRating():
def __init__(self):
self.chart = pygal.Histogram(
title='Quotes by Rating',
margin=20,
show_legend=False,
style=style
)
def pull(self):
data = {}
for quote in quotes:
data[quote.score] = data.get(quote.score, 0)
data[quote.score] += 1
return data
def generate(self):
data = self.pull()
bars = []
for key, value in data.items():
bars.append((value, key, key+1))
self.chart.add('quotes', bars)
return self.chart.render(is_unicode=True)
class VoteDistribution():
def __init__(self):
self.chart = pygal.Pie(
title='Vote Distribution',
margin=20,
inner_radius=.7,
style=style
)
def pull(self):
data = {}
up = votes.filter(value=1).count()
down = votes.filter(value=-1).count()
data['up'] = up
data['down'] = down
return data
def generate(self):
data = self.pull()
for key, value in data.items():
self.chart.add('{} ({})'.format(key, value), value)
return self.chart.render(is_unicode=True)
|
normal
|
{
"blob_id": "6f6f57ff317d7e3c6e6ae4d450c6fdf0e22eb4eb",
"index": 7256,
"step-1": "<mask token>\n\n\nclass QuotesByMonth:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass QuotesByRating:\n\n def __init__(self):\n self.chart = pygal.Histogram(title='Quotes by Rating', margin=20,\n show_legend=False, style=style)\n\n def pull(self):\n data = {}\n for quote in quotes:\n data[quote.score] = data.get(quote.score, 0)\n data[quote.score] += 1\n return data\n\n def generate(self):\n data = self.pull()\n bars = []\n for key, value in data.items():\n bars.append((value, key, key + 1))\n self.chart.add('quotes', bars)\n return self.chart.render(is_unicode=True)\n\n\nclass VoteDistribution:\n\n def __init__(self):\n self.chart = pygal.Pie(title='Vote Distribution', margin=20,\n inner_radius=0.7, style=style)\n\n def pull(self):\n data = {}\n up = votes.filter(value=1).count()\n down = votes.filter(value=-1).count()\n data['up'] = up\n data['down'] = down\n return data\n\n def generate(self):\n data = self.pull()\n for key, value in data.items():\n self.chart.add('{} ({})'.format(key, value), value)\n return self.chart.render(is_unicode=True)\n",
"step-2": "<mask token>\n\n\nclass QuotesByHour:\n\n def __init__(self):\n self.chart = pygal.Bar(title='Quotes by Hour', x_labels=list(map(\n str, range(24))), margin=20, show_legend=False, style=style)\n\n def pull(self):\n data = [(0) for _ in range(24)]\n for quote in quotes:\n data[localtime(quote.timestamp).hour] += 1\n return data\n\n def generate(self):\n data = self.pull()\n self.chart.add('quotes', data)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByMonth:\n\n def __init__(self):\n self.chart = pygal.Bar(title='Quotes by Month', x_labels=MONTHS,\n margin=20, show_legend=False, style=style)\n\n def pull(self):\n data = [(0) for _ in range(12)]\n for quote in quotes:\n data[localtime(quote.timestamp).month - 1] += 1\n return data\n\n def generate(self):\n data = self.pull()\n self.chart.add('quotes', data)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByRating:\n\n def __init__(self):\n self.chart = pygal.Histogram(title='Quotes by Rating', margin=20,\n show_legend=False, style=style)\n\n def pull(self):\n data = {}\n for quote in quotes:\n data[quote.score] = data.get(quote.score, 0)\n data[quote.score] += 1\n return data\n\n def generate(self):\n data = self.pull()\n bars = []\n for key, value in data.items():\n bars.append((value, key, key + 1))\n self.chart.add('quotes', bars)\n return self.chart.render(is_unicode=True)\n\n\nclass VoteDistribution:\n\n def __init__(self):\n self.chart = pygal.Pie(title='Vote Distribution', margin=20,\n inner_radius=0.7, style=style)\n\n def pull(self):\n data = {}\n up = votes.filter(value=1).count()\n down = votes.filter(value=-1).count()\n data['up'] = up\n data['down'] = down\n return data\n\n def generate(self):\n data = self.pull()\n for key, value in data.items():\n self.chart.add('{} ({})'.format(key, value), value)\n return self.chart.render(is_unicode=True)\n",
"step-3": "<mask token>\n\n\nclass QuotesOverTime:\n\n def __init__(self):\n self.chart = pygal.DateTimeLine(title='Quotes over Time',\n x_label_rotation=90, x_value_formatter=lambda dt: dt.strftime(\n '%b %Y'), margin=20, show_legend=False, show_dots=False, fill=\n True, style=style)\n\n def pull(self):\n data = {}\n for quote in quotes.order_by('timestamp'):\n timestamp = quote.timestamp.timestamp()\n data[timestamp] = data.get(timestamp, 0)\n data[timestamp] += 1\n return data\n\n def generate(self):\n data = self.pull()\n points = []\n total = 0\n for key, value in data.items():\n points.append((key, total))\n total += value\n self.chart.add('quotes', points)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByHour:\n\n def __init__(self):\n self.chart = pygal.Bar(title='Quotes by Hour', x_labels=list(map(\n str, range(24))), margin=20, show_legend=False, style=style)\n\n def pull(self):\n data = [(0) for _ in range(24)]\n for quote in quotes:\n data[localtime(quote.timestamp).hour] += 1\n return data\n\n def generate(self):\n data = self.pull()\n self.chart.add('quotes', data)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByMonth:\n\n def __init__(self):\n self.chart = pygal.Bar(title='Quotes by Month', x_labels=MONTHS,\n margin=20, show_legend=False, style=style)\n\n def pull(self):\n data = [(0) for _ in range(12)]\n for quote in quotes:\n data[localtime(quote.timestamp).month - 1] += 1\n return data\n\n def generate(self):\n data = self.pull()\n self.chart.add('quotes', data)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByRating:\n\n def __init__(self):\n self.chart = pygal.Histogram(title='Quotes by Rating', margin=20,\n show_legend=False, style=style)\n\n def pull(self):\n data = {}\n for quote in quotes:\n data[quote.score] = data.get(quote.score, 0)\n data[quote.score] += 1\n return data\n\n def generate(self):\n data = self.pull()\n bars = []\n for key, value in data.items():\n bars.append((value, key, key + 1))\n self.chart.add('quotes', bars)\n return self.chart.render(is_unicode=True)\n\n\nclass VoteDistribution:\n\n def __init__(self):\n self.chart = pygal.Pie(title='Vote Distribution', margin=20,\n inner_radius=0.7, style=style)\n\n def pull(self):\n data = {}\n up = votes.filter(value=1).count()\n down = votes.filter(value=-1).count()\n data['up'] = up\n data['down'] = down\n return data\n\n def generate(self):\n data = self.pull()\n for key, value in data.items():\n self.chart.add('{} ({})'.format(key, value), value)\n return self.chart.render(is_unicode=True)\n",
"step-4": "<mask token>\nstyle = Style(background='transparent', plot_background='transparent',\n foreground='#3d3d3d', foreground_strong='#303030', foreground_subtle=\n '#939393', opacity='.8', opacity_hover='.9', colors=('#fa5555', '#888'),\n label_font_size=15, major_label_font_size=15, title_font_size=20,\n legend_font_size=15)\nMONTHS = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',\n 'Oct', 'Nov', 'Dec')\nquotes = Quote.objects.annotate(score=Coalesce(Sum('vote__value'), 0),\n votes=Count('vote')).filter(approved=True)\nvotes = Vote.objects\n\n\nclass QuotesOverTime:\n\n def __init__(self):\n self.chart = pygal.DateTimeLine(title='Quotes over Time',\n x_label_rotation=90, x_value_formatter=lambda dt: dt.strftime(\n '%b %Y'), margin=20, show_legend=False, show_dots=False, fill=\n True, style=style)\n\n def pull(self):\n data = {}\n for quote in quotes.order_by('timestamp'):\n timestamp = quote.timestamp.timestamp()\n data[timestamp] = data.get(timestamp, 0)\n data[timestamp] += 1\n return data\n\n def generate(self):\n data = self.pull()\n points = []\n total = 0\n for key, value in data.items():\n points.append((key, total))\n total += value\n self.chart.add('quotes', points)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByHour:\n\n def __init__(self):\n self.chart = pygal.Bar(title='Quotes by Hour', x_labels=list(map(\n str, range(24))), margin=20, show_legend=False, style=style)\n\n def pull(self):\n data = [(0) for _ in range(24)]\n for quote in quotes:\n data[localtime(quote.timestamp).hour] += 1\n return data\n\n def generate(self):\n data = self.pull()\n self.chart.add('quotes', data)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByMonth:\n\n def __init__(self):\n self.chart = pygal.Bar(title='Quotes by Month', x_labels=MONTHS,\n margin=20, show_legend=False, style=style)\n\n def pull(self):\n data = [(0) for _ in range(12)]\n for quote in quotes:\n data[localtime(quote.timestamp).month - 1] += 1\n return data\n\n def generate(self):\n data = self.pull()\n self.chart.add('quotes', data)\n return self.chart.render(is_unicode=True)\n\n\nclass QuotesByRating:\n\n def __init__(self):\n self.chart = pygal.Histogram(title='Quotes by Rating', margin=20,\n show_legend=False, style=style)\n\n def pull(self):\n data = {}\n for quote in quotes:\n data[quote.score] = data.get(quote.score, 0)\n data[quote.score] += 1\n return data\n\n def generate(self):\n data = self.pull()\n bars = []\n for key, value in data.items():\n bars.append((value, key, key + 1))\n self.chart.add('quotes', bars)\n return self.chart.render(is_unicode=True)\n\n\nclass VoteDistribution:\n\n def __init__(self):\n self.chart = pygal.Pie(title='Vote Distribution', margin=20,\n inner_radius=0.7, style=style)\n\n def pull(self):\n data = {}\n up = votes.filter(value=1).count()\n down = votes.filter(value=-1).count()\n data['up'] = up\n data['down'] = down\n return data\n\n def generate(self):\n data = self.pull()\n for key, value in data.items():\n self.chart.add('{} ({})'.format(key, value), value)\n return self.chart.render(is_unicode=True)\n",
"step-5": "from django.db.models import Sum, Count\nfrom django.db.models.functions import Coalesce\nfrom django.utils.timezone import localtime\n\nfrom .models import Quote, Vote\n\nimport pygal\n\nfrom pygal.style import Style\nstyle = Style(\n\tbackground='transparent',\n\tplot_background='transparent',\n\tforeground='#3d3d3d',\n\tforeground_strong='#303030',\n\tforeground_subtle='#939393',\n\topacity='.8',\n\topacity_hover='.9',\n\tcolors=('#fa5555', '#888'),\n\tlabel_font_size=15,\n\tmajor_label_font_size=15,\n\ttitle_font_size=20,\n\tlegend_font_size=15\n)\n\nMONTHS = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')\n\nquotes = Quote.objects.annotate(score=Coalesce(Sum('vote__value'), 0), votes=Count('vote')).filter(approved=True)\nvotes = Vote.objects\n\nclass QuotesOverTime():\n\n\tdef __init__(self):\n\t\tself.chart = pygal.DateTimeLine(\n\t\t\ttitle='Quotes over Time',\n\t\t\tx_label_rotation=90,\n\t\t\tx_value_formatter=lambda dt: dt.strftime('%b %Y'),\n\t\t\tmargin=20,\n\t\t\tshow_legend=False,\n\t\t\tshow_dots=False,\n\t\t\tfill=True,\n\t\t\tstyle=style\n\t\t)\n\n\tdef pull(self):\n\t\tdata = {}\n\t\tfor quote in quotes.order_by('timestamp'):\n\t\t\ttimestamp = quote.timestamp.timestamp()\n\t\t\tdata[timestamp] = data.get(timestamp, 0)\n\t\t\tdata[timestamp] += 1\n\t\treturn data\n\n\tdef generate(self):\n\t\tdata = self.pull()\n\t\tpoints = []\n\t\ttotal = 0\n\t\tfor key, value in data.items():\n\t\t\tpoints.append((key, total))\n\t\t\ttotal += value\n\t\tself.chart.add('quotes', points)\n\t\treturn self.chart.render(is_unicode=True)\n\nclass QuotesByHour():\n\n\tdef __init__(self):\n\t\tself.chart = pygal.Bar(\n\t\t\ttitle='Quotes by Hour',\n\t\t\tx_labels = list(map(str, range(24))),\n\t\t\tmargin=20,\n\t\t\tshow_legend=False,\n\t\t\tstyle=style\n\t\t)\n\n\tdef pull(self):\n\t\tdata = [0 for _ in range(24)]\n\t\tfor quote in quotes:\n\t\t\tdata[localtime(quote.timestamp).hour] += 1\n\t\treturn data\n\n\tdef generate(self):\n\t\tdata = self.pull()\n\t\tself.chart.add('quotes', data)\n\t\treturn self.chart.render(is_unicode=True)\n\nclass QuotesByMonth():\n\n\tdef __init__(self):\n\t\tself.chart = pygal.Bar(\n\t\t\ttitle='Quotes by Month',\n\t\t\tx_labels = MONTHS,\n\t\t\tmargin=20,\n\t\t\tshow_legend=False,\n\t\t\tstyle=style\n\t\t)\n\n\tdef pull(self):\n\t\tdata = [0 for _ in range(12)]\n\t\tfor quote in quotes:\n\t\t\tdata[localtime(quote.timestamp).month-1] += 1\n\t\treturn data\n\n\tdef generate(self):\n\t\tdata = self.pull()\n\t\tself.chart.add('quotes', data)\n\t\treturn self.chart.render(is_unicode=True)\n\nclass QuotesByRating():\n\n\tdef __init__(self):\n\t\tself.chart = pygal.Histogram(\n\t\t\ttitle='Quotes by Rating',\n\t\t\tmargin=20,\n\t\t\tshow_legend=False,\n\t\t\tstyle=style\n\t\t)\n\n\tdef pull(self):\n\t\tdata = {}\n\t\tfor quote in quotes:\n\t\t\tdata[quote.score] = data.get(quote.score, 0)\n\t\t\tdata[quote.score] += 1\n\t\treturn data\n\n\tdef generate(self):\n\t\tdata = self.pull()\n\t\tbars = []\n\t\tfor key, value in data.items():\n\t\t\tbars.append((value, key, key+1))\n\t\tself.chart.add('quotes', bars)\n\t\treturn self.chart.render(is_unicode=True)\n\nclass VoteDistribution():\n\n\tdef __init__(self):\n\t\tself.chart = pygal.Pie(\n\t\t\ttitle='Vote Distribution',\n\t\t\tmargin=20,\n\t\t\tinner_radius=.7,\n\t\t\tstyle=style\n\t\t)\n\n\tdef pull(self):\n\t\tdata = {}\n\t\tup = votes.filter(value=1).count()\n\t\tdown = votes.filter(value=-1).count()\n\t\tdata['up'] = up\n\t\tdata['down'] = down\n\t\treturn data\n\n\tdef generate(self):\n\t\tdata = self.pull()\n\t\tfor key, value in data.items():\n\t\t\tself.chart.add('{} ({})'.format(key, value), value)\n\t\treturn self.chart.render(is_unicode=True)\n",
"step-ids": [
9,
16,
20,
21,
23
]
}
|
[
9,
16,
20,
21,
23
] |
# Generated by Django 3.1.7 on 2021-04-16 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuditLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField(auto_now=True, verbose_name='操作时间')),
('user', models.CharField(max_length=64, verbose_name='操作者')),
('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')),
('operate_type', models.CharField(max_length=32, verbose_name='操作类型')),
('change_message', models.TextField(blank=True, verbose_name='操作信息')),
],
options={
'verbose_name': '操作日志',
'ordering': ['-id'],
},
),
]
|
normal
|
{
"blob_id": "d65d85b4573728ed32ccf987459d5a228e2a8897",
"index": 5196,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='AuditLog', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('action_time', models.DateTimeField(\n auto_now=True, verbose_name='操作时间')), ('user', models.CharField(\n max_length=64, verbose_name='操作者')), ('obj', models.TextField(blank\n =True, null=True, verbose_name='操作对象')), ('operate_type', models.\n CharField(max_length=32, verbose_name='操作类型')), ('change_message',\n models.TextField(blank=True, verbose_name='操作信息'))], options={\n 'verbose_name': '操作日志', 'ordering': ['-id']})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='AuditLog', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('action_time', models.DateTimeField(\n auto_now=True, verbose_name='操作时间')), ('user', models.CharField(\n max_length=64, verbose_name='操作者')), ('obj', models.TextField(blank\n =True, null=True, verbose_name='操作对象')), ('operate_type', models.\n CharField(max_length=32, verbose_name='操作类型')), ('change_message',\n models.TextField(blank=True, verbose_name='操作信息'))], options={\n 'verbose_name': '操作日志', 'ordering': ['-id']})]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-04-16 14:03\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AuditLog',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('action_time', models.DateTimeField(auto_now=True, verbose_name='操作时间')),\n ('user', models.CharField(max_length=64, verbose_name='操作者')),\n ('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')),\n ('operate_type', models.CharField(max_length=32, verbose_name='操作类型')),\n ('change_message', models.TextField(blank=True, verbose_name='操作信息')),\n ],\n options={\n 'verbose_name': '操作日志',\n 'ordering': ['-id'],\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
def component_mass_to_volume(mass, component):
component_density = CHEMICALS[component][1]
component_volume = mass * component_density
return component_volume
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CHEMICALS_KEY_GUIDE = ['MW', 'Density']
CHEMICALS = {'Bacteria': ['NA', 1.05], 'Calcium Carbonate': [100.087, 2.71],
'Calcium Lactate': [218.22, 1.494], 'Corn Steep Liquor': ['NA', 1.2326],
'Glucose': [180.156, 1.54], 'Lactic Acid': [90.08, 1.206], 'Octanol': [
130.231, 0.824], 'Tween 80': ['NA', 1.07], 'Water': [18.015, 0.995],
'Water/Glucose 10%': [34.2291, 1.0375]}
SOLVE_FOR_PRODUCTION = True
PRODUCTION_TO_SOLVE = 100000000
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
def component_mass_to_volume(mass, component):
component_density = CHEMICALS[component][1]
component_volume = mass * component_density
return component_volume
TIME_INIT = 0
C_BACT_INIT = 0.7
C_GLUC_INIT = 100.0
C_LA_INIT = 12.57
C_TWEEN_INIT = 1.0
dBACT_dT_INIT = 0.0
FERMENT_IN = {'Bacteria Concentration': C_BACT_INIT,
'Glucose Concentration': C_GLUC_INIT, 'Lactic Acid Concentration':
C_LA_INIT, 'Tween 80 Concentration': C_TWEEN_INIT}
FERMENT_WATER_VOL = 750000
FERMENT_VESSEL_COUNT = 4
FERMENT_RUNTIME = 32
FERMENT_DOWNTIME = 8
FERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME
FERMENT_CONST = {'Water Volume': FERMENT_WATER_VOL, 'Vessel Count':
FERMENT_VESSEL_COUNT, 'Runtime': FERMENT_RUNTIME, 'Downtime':
FERMENT_DOWNTIME, 'Batch Time': FERMENT_BATCH_TIME}
SALTS_pKa = 3.86
SALTS_Ka = pow(10, -1 * SALTS_pKa)
MAX_pH = 3.8
pKa_pH_CALC = pow(10, SALTS_pKa - MAX_pH)
MW_SALT = CHEMICALS['Calcium Lactate'][0]
MW_LA = CHEMICALS['Lactic Acid'][0]
<|reserved_special_token_1|>
from FluidStream import *
CHEMICALS_KEY_GUIDE = ['MW', 'Density']
CHEMICALS = {'Bacteria': ['NA', 1.05], 'Calcium Carbonate': [100.087, 2.71],
'Calcium Lactate': [218.22, 1.494], 'Corn Steep Liquor': ['NA', 1.2326],
'Glucose': [180.156, 1.54], 'Lactic Acid': [90.08, 1.206], 'Octanol': [
130.231, 0.824], 'Tween 80': ['NA', 1.07], 'Water': [18.015, 0.995],
'Water/Glucose 10%': [34.2291, 1.0375]}
SOLVE_FOR_PRODUCTION = True
PRODUCTION_TO_SOLVE = 100000000
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
def component_mass_to_volume(mass, component):
component_density = CHEMICALS[component][1]
component_volume = mass * component_density
return component_volume
TIME_INIT = 0
C_BACT_INIT = 0.7
C_GLUC_INIT = 100.0
C_LA_INIT = 12.57
C_TWEEN_INIT = 1.0
dBACT_dT_INIT = 0.0
FERMENT_IN = {'Bacteria Concentration': C_BACT_INIT,
'Glucose Concentration': C_GLUC_INIT, 'Lactic Acid Concentration':
C_LA_INIT, 'Tween 80 Concentration': C_TWEEN_INIT}
FERMENT_WATER_VOL = 750000
FERMENT_VESSEL_COUNT = 4
FERMENT_RUNTIME = 32
FERMENT_DOWNTIME = 8
FERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME
FERMENT_CONST = {'Water Volume': FERMENT_WATER_VOL, 'Vessel Count':
FERMENT_VESSEL_COUNT, 'Runtime': FERMENT_RUNTIME, 'Downtime':
FERMENT_DOWNTIME, 'Batch Time': FERMENT_BATCH_TIME}
SALTS_pKa = 3.86
SALTS_Ka = pow(10, -1 * SALTS_pKa)
MAX_pH = 3.8
pKa_pH_CALC = pow(10, SALTS_pKa - MAX_pH)
MW_SALT = CHEMICALS['Calcium Lactate'][0]
MW_LA = CHEMICALS['Lactic Acid'][0]
<|reserved_special_token_1|>
from FluidStream import *
# List of chemicals and their constant properties
CHEMICALS_KEY_GUIDE = ['MW' , 'Density']
CHEMICALS = {
'Bacteria' : ['NA' , 1.05 ],
'Calcium Carbonate' : [100.087 , 2.71 ],
'Calcium Lactate' : [218.22 , 1.494 ],
'Corn Steep Liquor' : ['NA' , 1.2326],
'Glucose' : [180.156 , 1.54 ],
'Lactic Acid' : [90.08 , 1.206 ],
'Octanol' : [130.231 , .824 ],
'Tween 80' : ['NA' , 1.07 ],
'Water' : [18.015 , .995 ],
'Water/Glucose 10%' : [34.2291 , 1.0375]
}
SOLVE_FOR_PRODUCTION = True
PRODUCTION_TO_SOLVE = 100000000
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
def component_mass_to_volume(mass, component):
component_density = CHEMICALS[component][1]
component_volume = mass*component_density
return component_volume
# Bacterial Growth Curve
# TIME_INIT --> hours
TIME_INIT = 0
# C_BACT_INIT --> g/L
C_BACT_INIT = .7
# C_GLUC_INIT --> g/L
C_GLUC_INIT = 100.0
# C_LA_INIT --> g/L
C_LA_INIT = 12.57
# C_TWEEN_INIT --> g/L
C_TWEEN_INIT = 1.0
# dBACT_dT -- > g/L*h
dBACT_dT_INIT = 0.0
FERMENT_IN = {
'Bacteria Concentration' : C_BACT_INIT,
'Glucose Concentration' : C_GLUC_INIT,
'Lactic Acid Concentration' : C_LA_INIT,
'Tween 80 Concentration' : C_TWEEN_INIT
}
# HOLDING TANK SPECS
# Initial Fermentation Water Charge in Liters
FERMENT_WATER_VOL = 750000
# Number of Fermentation Vessels
FERMENT_VESSEL_COUNT = 4
# Runtime of Fermentation Process
FERMENT_RUNTIME = 32
# Downtime of Fermentation Process
FERMENT_DOWNTIME = 8
# Total Runtime of Each Fermentation Batch
FERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME
FERMENT_CONST = {
'Water Volume' : FERMENT_WATER_VOL,
'Vessel Count' : FERMENT_VESSEL_COUNT,
'Runtime' : FERMENT_RUNTIME,
'Downtime' : FERMENT_DOWNTIME,
'Batch Time' : FERMENT_BATCH_TIME }
# Acid Dissociation Constant Ka
SALTS_pKa = 3.86
SALTS_Ka = pow(10, (-1*SALTS_pKa))
MAX_pH = 3.8
pKa_pH_CALC = pow(10, (SALTS_pKa - MAX_pH))
MW_SALT = CHEMICALS['Calcium Lactate'][0]
MW_LA = CHEMICALS['Lactic Acid'][0]
|
flexible
|
{
"blob_id": "3471f02f507104202c1e49440172f120ba17730f",
"index": 9263,
"step-1": "<mask token>\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass * component_density\n return component_volume\n\n\n<mask token>\n",
"step-3": "<mask token>\nCHEMICALS_KEY_GUIDE = ['MW', 'Density']\nCHEMICALS = {'Bacteria': ['NA', 1.05], 'Calcium Carbonate': [100.087, 2.71],\n 'Calcium Lactate': [218.22, 1.494], 'Corn Steep Liquor': ['NA', 1.2326],\n 'Glucose': [180.156, 1.54], 'Lactic Acid': [90.08, 1.206], 'Octanol': [\n 130.231, 0.824], 'Tween 80': ['NA', 1.07], 'Water': [18.015, 0.995],\n 'Water/Glucose 10%': [34.2291, 1.0375]}\nSOLVE_FOR_PRODUCTION = True\nPRODUCTION_TO_SOLVE = 100000000\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass * component_density\n return component_volume\n\n\nTIME_INIT = 0\nC_BACT_INIT = 0.7\nC_GLUC_INIT = 100.0\nC_LA_INIT = 12.57\nC_TWEEN_INIT = 1.0\ndBACT_dT_INIT = 0.0\nFERMENT_IN = {'Bacteria Concentration': C_BACT_INIT,\n 'Glucose Concentration': C_GLUC_INIT, 'Lactic Acid Concentration':\n C_LA_INIT, 'Tween 80 Concentration': C_TWEEN_INIT}\nFERMENT_WATER_VOL = 750000\nFERMENT_VESSEL_COUNT = 4\nFERMENT_RUNTIME = 32\nFERMENT_DOWNTIME = 8\nFERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME\nFERMENT_CONST = {'Water Volume': FERMENT_WATER_VOL, 'Vessel Count':\n FERMENT_VESSEL_COUNT, 'Runtime': FERMENT_RUNTIME, 'Downtime':\n FERMENT_DOWNTIME, 'Batch Time': FERMENT_BATCH_TIME}\nSALTS_pKa = 3.86\nSALTS_Ka = pow(10, -1 * SALTS_pKa)\nMAX_pH = 3.8\npKa_pH_CALC = pow(10, SALTS_pKa - MAX_pH)\nMW_SALT = CHEMICALS['Calcium Lactate'][0]\nMW_LA = CHEMICALS['Lactic Acid'][0]\n",
"step-4": "from FluidStream import *\nCHEMICALS_KEY_GUIDE = ['MW', 'Density']\nCHEMICALS = {'Bacteria': ['NA', 1.05], 'Calcium Carbonate': [100.087, 2.71],\n 'Calcium Lactate': [218.22, 1.494], 'Corn Steep Liquor': ['NA', 1.2326],\n 'Glucose': [180.156, 1.54], 'Lactic Acid': [90.08, 1.206], 'Octanol': [\n 130.231, 0.824], 'Tween 80': ['NA', 1.07], 'Water': [18.015, 0.995],\n 'Water/Glucose 10%': [34.2291, 1.0375]}\nSOLVE_FOR_PRODUCTION = True\nPRODUCTION_TO_SOLVE = 100000000\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass * component_density\n return component_volume\n\n\nTIME_INIT = 0\nC_BACT_INIT = 0.7\nC_GLUC_INIT = 100.0\nC_LA_INIT = 12.57\nC_TWEEN_INIT = 1.0\ndBACT_dT_INIT = 0.0\nFERMENT_IN = {'Bacteria Concentration': C_BACT_INIT,\n 'Glucose Concentration': C_GLUC_INIT, 'Lactic Acid Concentration':\n C_LA_INIT, 'Tween 80 Concentration': C_TWEEN_INIT}\nFERMENT_WATER_VOL = 750000\nFERMENT_VESSEL_COUNT = 4\nFERMENT_RUNTIME = 32\nFERMENT_DOWNTIME = 8\nFERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME\nFERMENT_CONST = {'Water Volume': FERMENT_WATER_VOL, 'Vessel Count':\n FERMENT_VESSEL_COUNT, 'Runtime': FERMENT_RUNTIME, 'Downtime':\n FERMENT_DOWNTIME, 'Batch Time': FERMENT_BATCH_TIME}\nSALTS_pKa = 3.86\nSALTS_Ka = pow(10, -1 * SALTS_pKa)\nMAX_pH = 3.8\npKa_pH_CALC = pow(10, SALTS_pKa - MAX_pH)\nMW_SALT = CHEMICALS['Calcium Lactate'][0]\nMW_LA = CHEMICALS['Lactic Acid'][0]\n",
"step-5": "from FluidStream import *\n# List of chemicals and their constant properties\n\nCHEMICALS_KEY_GUIDE = ['MW' , 'Density']\nCHEMICALS = {\n'Bacteria'\t\t\t: ['NA' , 1.05 ],\n'Calcium Carbonate' : [100.087 , 2.71 ],\n'Calcium Lactate' : [218.22 , 1.494 ],\n'Corn Steep Liquor' : ['NA'\t , 1.2326],\n'Glucose'\t\t\t: [180.156 , 1.54 ],\n'Lactic Acid'\t\t: [90.08 , 1.206 ],\n'Octanol' : [130.231 , .824 ],\n'Tween 80'\t\t\t: ['NA'\t , 1.07 ],\n'Water'\t\t\t\t: [18.015 , .995 ],\n'Water/Glucose 10%'\t: [34.2291 , 1.0375]\n}\n\nSOLVE_FOR_PRODUCTION = True\nPRODUCTION_TO_SOLVE = 100000000\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass*component_density\n return component_volume\n\n\n# Bacterial Growth Curve\n\n# TIME_INIT --> hours\nTIME_INIT = 0\n\n# C_BACT_INIT --> g/L\nC_BACT_INIT = .7\n\n# C_GLUC_INIT --> g/L\nC_GLUC_INIT = 100.0\n\n# C_LA_INIT --> g/L\nC_LA_INIT = 12.57\n\n# C_TWEEN_INIT --> g/L\nC_TWEEN_INIT = 1.0\n\n# dBACT_dT -- > g/L*h\ndBACT_dT_INIT = 0.0\n\nFERMENT_IN = {\n'Bacteria Concentration' : C_BACT_INIT,\n'Glucose Concentration' : C_GLUC_INIT,\n'Lactic Acid Concentration' : C_LA_INIT,\n'Tween 80 Concentration' : C_TWEEN_INIT\n}\n\n# HOLDING TANK SPECS\n# Initial Fermentation Water Charge in Liters\nFERMENT_WATER_VOL = 750000\n# Number of Fermentation Vessels\nFERMENT_VESSEL_COUNT = 4\n# Runtime of Fermentation Process\nFERMENT_RUNTIME = 32\n# Downtime of Fermentation Process\nFERMENT_DOWNTIME = 8\n# Total Runtime of Each Fermentation Batch\nFERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME\n\nFERMENT_CONST = {\n'Water Volume' : FERMENT_WATER_VOL,\n'Vessel Count' : FERMENT_VESSEL_COUNT,\n'Runtime' : FERMENT_RUNTIME,\n'Downtime' : FERMENT_DOWNTIME,\n'Batch Time' : FERMENT_BATCH_TIME }\n\n# Acid Dissociation Constant Ka\nSALTS_pKa = 3.86\nSALTS_Ka = pow(10, (-1*SALTS_pKa))\nMAX_pH = 3.8\npKa_pH_CALC = pow(10, (SALTS_pKa - MAX_pH))\nMW_SALT = CHEMICALS['Calcium Lactate'][0]\nMW_LA = CHEMICALS['Lactic Acid'][0]\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class UserForm(forms.ModelForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = User
fields = 'first_name', 'last_name', 'email'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data['email']
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = 'id',
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
<|reserved_special_token_0|>
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = 'account', 'slug', 'status', 'winner', 'is_active'
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class':
'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get('expire').date()
if expire < date.today() + timedelta(days=3):
raise forms.ValidationError(_(
'Expire should be 72 hour from now on.'))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = 'id', 'auction', 'bidder'
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction=self.auction).aggregate(Max(
'bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get('bid_price')
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_('Price should be more than %s.' %
'{0:.2f}'.format(min_price)))
return price
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserForm(forms.ModelForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = User
fields = 'first_name', 'last_name', 'email'
<|reserved_special_token_0|>
def clean_email(self):
email = self.cleaned_data.get('email')
check = User.objects.filter(email=email)
if self.instance.email == email:
return email
else:
if len(check) > 0:
raise forms.ValidationError(_(
'This email address is already in use. Please supply a different email address.'
))
return email
<|reserved_special_token_0|>
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data['email']
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = 'id',
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
<|reserved_special_token_0|>
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = 'account', 'slug', 'status', 'winner', 'is_active'
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class':
'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get('expire').date()
if expire < date.today() + timedelta(days=3):
raise forms.ValidationError(_(
'Expire should be 72 hour from now on.'))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = 'id', 'auction', 'bidder'
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction=self.auction).aggregate(Max(
'bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get('bid_price')
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_('Price should be more than %s.' %
'{0:.2f}'.format(min_price)))
return price
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserForm(forms.ModelForm):
error_email = {'email_exist': _('Email allready exist.')}
error_password = {'password_less': _(
'Password should be more than 6 characters.')}
password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
class Meta:
model = User
fields = 'first_name', 'last_name', 'email'
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
def clean_email(self):
email = self.cleaned_data.get('email')
check = User.objects.filter(email=email)
if self.instance.email == email:
return email
else:
if len(check) > 0:
raise forms.ValidationError(_(
'This email address is already in use. Please supply a different email address.'
))
return email
def clean_password(self):
password = self.cleaned_data.get('password')
if len(password) < 6:
raise forms.ValidationError(_(
'Password should be more than 6 characters.'))
return password
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data['email']
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = 'id',
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
UserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm,
extra=1, can_delete=False)
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = 'account', 'slug', 'status', 'winner', 'is_active'
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class':
'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get('expire').date()
if expire < date.today() + timedelta(days=3):
raise forms.ValidationError(_(
'Expire should be 72 hour from now on.'))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = 'id', 'auction', 'bidder'
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction=self.auction).aggregate(Max(
'bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get('bid_price')
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_('Price should be more than %s.' %
'{0:.2f}'.format(min_price)))
return price
<|reserved_special_token_1|>
from django import forms
from django.forms import inlineformset_factory
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models import Max
from auction.models import *
from datetime import *
from decimal import *
import re
class UserForm(forms.ModelForm):
error_email = {'email_exist': _('Email allready exist.')}
error_password = {'password_less': _(
'Password should be more than 6 characters.')}
password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)
class Meta:
model = User
fields = 'first_name', 'last_name', 'email'
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
def clean_email(self):
email = self.cleaned_data.get('email')
check = User.objects.filter(email=email)
if self.instance.email == email:
return email
else:
if len(check) > 0:
raise forms.ValidationError(_(
'This email address is already in use. Please supply a different email address.'
))
return email
def clean_password(self):
password = self.cleaned_data.get('password')
if len(password) < 6:
raise forms.ValidationError(_(
'Password should be more than 6 characters.'))
return password
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data['email']
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = 'id',
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
UserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm,
extra=1, can_delete=False)
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = 'account', 'slug', 'status', 'winner', 'is_active'
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class':
'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get('expire').date()
if expire < date.today() + timedelta(days=3):
raise forms.ValidationError(_(
'Expire should be 72 hour from now on.'))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = 'id', 'auction', 'bidder'
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg',
'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction=self.auction).aggregate(Max(
'bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get('bid_price')
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_('Price should be more than %s.' %
'{0:.2f}'.format(min_price)))
return price
<|reserved_special_token_1|>
from django import forms
from django.forms import inlineformset_factory
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models import Max
from auction.models import *
from datetime import *
from decimal import *
import re
class UserForm(forms.ModelForm):
error_email = {
'email_exist': _("Email allready exist."),
}
error_password = {
'password_less': _("Password should be more than 6 characters."),
}
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['email'].required = True
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,
'autocomplete': 'off'})
def clean_email(self):
email = self.cleaned_data.get("email")
check = User.objects.filter(email=email)
if self.instance.email == email:
return email
else:
if len(check) > 0:
raise forms.ValidationError(
_("This email address is already in use. Please supply a different email address."))
return email
def clean_password(self):
password = self.cleaned_data.get("password")
if len(password) < 6:
raise forms.ValidationError(
_("Password should be more than 6 characters."))
return password
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.username = self.cleaned_data["email"]
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ('id',)
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,
'autocomplete': 'off'})
UserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm, extra=1, can_delete=False)
class AuctionForm(forms.ModelForm):
class Meta:
model = Auction
exclude = ('account', 'slug', 'status', 'winner', 'is_active',)
def __init__(self, *args, **kwargs):
super(AuctionForm, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})
if field and field_name == 'expire':
field.widget.attrs.update({'class': 'form-control input-lg datepicker'})
def clean_expire(self):
expire = self.cleaned_data.get("expire").date()
if expire < (date.today() + timedelta(days=3)):
raise forms.ValidationError(_("Expire should be 72 hour from now on."))
return expire
class BidAuction(forms.ModelForm):
class Meta:
model = Bid
exclude = ('id', 'auction', 'bidder',)
def __init__(self, *args, **kwargs):
self.auction = kwargs.pop('auction', None)
super(BidAuction, self).__init__(*args, **kwargs)
for field_name in self.fields:
field = self.fields.get(field_name)
if field:
field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})
def clean_bid_price(self):
qs = Bid.objects.filter(auction = self.auction).aggregate(Max('bid_price'))['bid_price__max']
if qs is None:
qs = self.auction.price.amount
price = self.cleaned_data.get("bid_price")
# min_price = qs + (self.auction.price.amount * 5) / 100
min_price = qs + Decimal(0.05)
if price < min_price:
raise forms.ValidationError(_("Price should be more than %s." % "{0:.2f}".format(min_price)))
return price
|
flexible
|
{
"blob_id": "5215b5e4efe2e126f18b3c4457dc3e3902923d49",
"index": 6360,
"step-1": "<mask token>\n\n\nclass UserForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\n<mask token>\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-2": "<mask token>\n\n\nclass UserForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n <mask token>\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n check = User.objects.filter(email=email)\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(_(\n 'This email address is already in use. Please supply a different email address.'\n ))\n return email\n <mask token>\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\n<mask token>\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-3": "<mask token>\n\n\nclass UserForm(forms.ModelForm):\n error_email = {'email_exist': _('Email allready exist.')}\n error_password = {'password_less': _(\n 'Password should be more than 6 characters.')}\n password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n check = User.objects.filter(email=email)\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(_(\n 'This email address is already in use. Please supply a different email address.'\n ))\n return email\n\n def clean_password(self):\n password = self.cleaned_data.get('password')\n if len(password) < 6:\n raise forms.ValidationError(_(\n 'Password should be more than 6 characters.'))\n return password\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\nUserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm,\n extra=1, can_delete=False)\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-4": "from django import forms\nfrom django.forms import inlineformset_factory\nfrom django.utils.translation import ugettext, ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.db.models import Max\nfrom auction.models import *\nfrom datetime import *\nfrom decimal import *\nimport re\n\n\nclass UserForm(forms.ModelForm):\n error_email = {'email_exist': _('Email allready exist.')}\n error_password = {'password_less': _(\n 'Password should be more than 6 characters.')}\n password = forms.CharField(label=_('Password'), widget=forms.PasswordInput)\n\n\n class Meta:\n model = User\n fields = 'first_name', 'last_name', 'email'\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n check = User.objects.filter(email=email)\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(_(\n 'This email address is already in use. Please supply a different email address.'\n ))\n return email\n\n def clean_password(self):\n password = self.cleaned_data.get('password')\n if len(password) < 6:\n raise forms.ValidationError(_(\n 'Password should be more than 6 characters.'))\n return password\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data['email']\n user.set_password(self.cleaned_data['password'])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = Profile\n exclude = 'id',\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n\nUserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm,\n extra=1, can_delete=False)\n\n\nclass AuctionForm(forms.ModelForm):\n\n\n class Meta:\n model = Auction\n exclude = 'account', 'slug', 'status', 'winner', 'is_active'\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class':\n 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get('expire').date()\n if expire < date.today() + timedelta(days=3):\n raise forms.ValidationError(_(\n 'Expire should be 72 hour from now on.'))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n\n class Meta:\n model = Bid\n exclude = 'id', 'auction', 'bidder'\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg',\n 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction=self.auction).aggregate(Max(\n 'bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get('bid_price')\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_('Price should be more than %s.' %\n '{0:.2f}'.format(min_price)))\n return price\n",
"step-5": "from django import forms\nfrom django.forms import inlineformset_factory\nfrom django.utils.translation import ugettext, ugettext_lazy as _\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.db.models import Max\nfrom auction.models import *\nfrom datetime import *\nfrom decimal import *\nimport re\n\n\nclass UserForm(forms.ModelForm):\n\n error_email = {\n 'email_exist': _(\"Email allready exist.\"),\n }\n\n error_password = {\n 'password_less': _(\"Password should be more than 6 characters.\"),\n }\n\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ('first_name', 'last_name', 'email')\n\n def __init__(self, *args, **kwargs):\n super(UserForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,\n 'autocomplete': 'off'})\n\n def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n check = User.objects.filter(email=email)\n\n if self.instance.email == email:\n return email\n else:\n if len(check) > 0:\n raise forms.ValidationError(\n _(\"This email address is already in use. Please supply a different email address.\"))\n return email\n\n def clean_password(self):\n password = self.cleaned_data.get(\"password\")\n if len(password) < 6:\n raise forms.ValidationError(\n _(\"Password should be more than 6 characters.\"))\n return password\n\n def save(self, commit=True):\n user = super(UserForm, self).save(commit=False)\n user.username = self.cleaned_data[\"email\"]\n user.set_password(self.cleaned_data[\"password\"])\n if commit:\n user.save()\n return user\n\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = Profile\n exclude = ('id',)\n\n def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label,\n 'autocomplete': 'off'})\n\n\nUserProfileForm = inlineformset_factory(User, Profile, form=ProfileForm, extra=1, can_delete=False)\n\n\n\nclass AuctionForm(forms.ModelForm):\n\n class Meta:\n model = Auction\n exclude = ('account', 'slug', 'status', 'winner', 'is_active',)\n\n def __init__(self, *args, **kwargs):\n super(AuctionForm, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})\n if field and field_name == 'expire':\n field.widget.attrs.update({'class': 'form-control input-lg datepicker'})\n\n def clean_expire(self):\n expire = self.cleaned_data.get(\"expire\").date()\n if expire < (date.today() + timedelta(days=3)):\n raise forms.ValidationError(_(\"Expire should be 72 hour from now on.\"))\n return expire\n\n\nclass BidAuction(forms.ModelForm):\n\n class Meta:\n model = Bid\n exclude = ('id', 'auction', 'bidder',)\n\n def __init__(self, *args, **kwargs):\n self.auction = kwargs.pop('auction', None)\n super(BidAuction, self).__init__(*args, **kwargs)\n for field_name in self.fields:\n field = self.fields.get(field_name)\n if field:\n field.widget.attrs.update({'class': 'form-control input-lg', 'placeholder': field.label, 'autocomplete': 'off'})\n\n def clean_bid_price(self):\n qs = Bid.objects.filter(auction = self.auction).aggregate(Max('bid_price'))['bid_price__max']\n if qs is None:\n qs = self.auction.price.amount\n price = self.cleaned_data.get(\"bid_price\")\n # min_price = qs + (self.auction.price.amount * 5) / 100\n min_price = qs + Decimal(0.05)\n if price < min_price:\n raise forms.ValidationError(_(\"Price should be more than %s.\" % \"{0:.2f}\".format(min_price)))\n return price\n",
"step-ids": [
10,
11,
15,
16,
17
]
}
|
[
10,
11,
15,
16,
17
] |
import pymysql
class DB:
def __init__(self, host='localhost', port=3306, db_='test', user='wj',
passwd='', charset='utf8'):
self.db = db_
self.conn = pymysql.connect(host=host, port=port, db=db_, user=user, passwd=passwd, charset=charset)
self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.commit()
self.cur.close()
self.conn.close()
def write(self, data):
sql = "INSERT INTO {}({}) VALUES ('%s')".format('data', 'a') % data
self.cur.execute(sql)
self.conn.commit()
def read(self):
sql = "SELECT * FROM {}".format('data')
self.cur.execute(sql)
results = self.cur.fetchall()
return results[0]['a']
if __name__ == '__main__':
test = [1, 2, 3, 4, 5, 6, 7]
with DB() as db:
db.write(str(test))
a = eval(db.read())
print(a[2:])
|
normal
|
{
"blob_id": "80ad4459436e2e1cc44509e7dae18d1539bf2bc0",
"index": 8139,
"step-1": "<mask token>\n\n\nclass DB:\n <mask token>\n\n def __enter__(self):\n return self\n <mask token>\n\n def write(self, data):\n sql = \"INSERT INTO {}({}) VALUES ('%s')\".format('data', 'a') % data\n self.cur.execute(sql)\n self.conn.commit()\n\n def read(self):\n sql = 'SELECT * FROM {}'.format('data')\n self.cur.execute(sql)\n results = self.cur.fetchall()\n return results[0]['a']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DB:\n\n def __init__(self, host='localhost', port=3306, db_='test', user='wj',\n passwd='', charset='utf8'):\n self.db = db_\n self.conn = pymysql.connect(host=host, port=port, db=db_, user=user,\n passwd=passwd, charset=charset)\n self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.commit()\n self.cur.close()\n self.conn.close()\n\n def write(self, data):\n sql = \"INSERT INTO {}({}) VALUES ('%s')\".format('data', 'a') % data\n self.cur.execute(sql)\n self.conn.commit()\n\n def read(self):\n sql = 'SELECT * FROM {}'.format('data')\n self.cur.execute(sql)\n results = self.cur.fetchall()\n return results[0]['a']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DB:\n\n def __init__(self, host='localhost', port=3306, db_='test', user='wj',\n passwd='', charset='utf8'):\n self.db = db_\n self.conn = pymysql.connect(host=host, port=port, db=db_, user=user,\n passwd=passwd, charset=charset)\n self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.commit()\n self.cur.close()\n self.conn.close()\n\n def write(self, data):\n sql = \"INSERT INTO {}({}) VALUES ('%s')\".format('data', 'a') % data\n self.cur.execute(sql)\n self.conn.commit()\n\n def read(self):\n sql = 'SELECT * FROM {}'.format('data')\n self.cur.execute(sql)\n results = self.cur.fetchall()\n return results[0]['a']\n\n\nif __name__ == '__main__':\n test = [1, 2, 3, 4, 5, 6, 7]\n with DB() as db:\n db.write(str(test))\n a = eval(db.read())\n print(a[2:])\n",
"step-4": "import pymysql\n\n\nclass DB:\n\n def __init__(self, host='localhost', port=3306, db_='test', user='wj',\n passwd='', charset='utf8'):\n self.db = db_\n self.conn = pymysql.connect(host=host, port=port, db=db_, user=user,\n passwd=passwd, charset=charset)\n self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.commit()\n self.cur.close()\n self.conn.close()\n\n def write(self, data):\n sql = \"INSERT INTO {}({}) VALUES ('%s')\".format('data', 'a') % data\n self.cur.execute(sql)\n self.conn.commit()\n\n def read(self):\n sql = 'SELECT * FROM {}'.format('data')\n self.cur.execute(sql)\n results = self.cur.fetchall()\n return results[0]['a']\n\n\nif __name__ == '__main__':\n test = [1, 2, 3, 4, 5, 6, 7]\n with DB() as db:\n db.write(str(test))\n a = eval(db.read())\n print(a[2:])\n",
"step-5": "import pymysql\r\n\r\n\r\nclass DB:\r\n def __init__(self, host='localhost', port=3306, db_='test', user='wj',\r\n passwd='', charset='utf8'):\r\n self.db = db_\r\n self.conn = pymysql.connect(host=host, port=port, db=db_, user=user, passwd=passwd, charset=charset)\r\n self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)\r\n\r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self.conn.commit()\r\n self.cur.close()\r\n self.conn.close()\r\n\r\n def write(self, data):\r\n sql = \"INSERT INTO {}({}) VALUES ('%s')\".format('data', 'a') % data\r\n self.cur.execute(sql)\r\n self.conn.commit()\r\n\r\n def read(self):\r\n sql = \"SELECT * FROM {}\".format('data')\r\n self.cur.execute(sql)\r\n results = self.cur.fetchall()\r\n return results[0]['a']\r\n\r\n\r\nif __name__ == '__main__':\r\n test = [1, 2, 3, 4, 5, 6, 7]\r\n with DB() as db:\r\n db.write(str(test))\r\n a = eval(db.read())\r\n print(a[2:])\r\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# Generated by Django 2.2.15 on 2020-09-16 03:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('api', '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'),
]
operations = [
migrations.AlterField(
model_name='address',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='city_addresses', related_query_name='city_address', to='api.City'),
),
migrations.AlterField(
model_name='address',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='district_addresses', related_query_name='district_address', to='api.District'),
),
migrations.AlterField(
model_name='address',
name='street',
field=models.ForeignKey(max_length=255, on_delete=django.db.models.fields.Empty, related_name='street_addresses', related_query_name='street_address', to='api.Street', verbose_name='Улица'),
),
migrations.AlterField(
model_name='couriershift',
name='courier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='couriers', related_query_name='courier', to=settings.AUTH_USER_MODEL, verbose_name='Курьер'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shift_vehicles', related_query_name='shift_vehicle', to='api.Vehicle', verbose_name='Транспортное средство'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle_accepted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_accepted_bys', related_query_name='vehicle_accepted_by', to=settings.AUTH_USER_MODEL, verbose_name='Принял'),
),
migrations.AlterField(
model_name='couriershift',
name='vehicle_given_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_given_bys', related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL, verbose_name='Выдал'),
),
migrations.AlterField(
model_name='district',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='city_districts', related_query_name='city_district', to='api.City'),
),
migrations.AlterField(
model_name='technicalservice',
name='address',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_services', related_query_name='address_service', to='api.Address', verbose_name='Адрес СТО'),
),
migrations.AlterField(
model_name='vehicleservice',
name='service',
field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='service_vehicles', related_query_name='service_vehicle', to='api.TechnicalService'),
),
migrations.AlterField(
model_name='vehicleservice',
name='vehicle',
field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='vehicles', related_query_name='vehicle', to='api.Vehicle'),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),
('status', models.CharField(choices=[('new', 'Новый'), ('accepted', 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), ('in_progress', 'Выполняется')], default='new', max_length=100, verbose_name='Статус заказа')),
('accepted_time', models.DateTimeField(blank=True, null=True, verbose_name='Время подтверждения заказа')),
('start_time', models.DateTimeField(blank=True, null=True, verbose_name='Время начала выполнения заказа')),
('end_time', models.DateTimeField(blank=True, null=True, verbose_name='Время завершения заказа')),
('reciever_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Имя получателя')),
('info', models.TextField(blank=True, null=True, verbose_name='Дополнительные сведения')),
('ransom_sum', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Сумма выкупа')),
('wait_time', models.TimeField(blank=True, null=True, verbose_name='Время ожидания')),
('delivery_cost', models.IntegerField(blank=True, null=True, verbose_name='Стоимость даставки')),
('delivery_time', models.TimeField(blank=True, null=True, verbose_name='Время выполнения заказа')),
('courier_shift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courier_orders', related_query_name='courier_order', to='api.CourierShift', verbose_name='Смена курьера')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders_created_by', related_query_name='order_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Кем создан')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customers_orders', related_query_name='customer_order', to=settings.AUTH_USER_MODEL, verbose_name='Клиент')),
('delivery_from', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_from', related_query_name='address_from', to='api.Address', verbose_name='Забрать от')),
('delivery_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_to', related_query_name='address_to', to='api.Address', verbose_name='Куда доставить')),
],
options={
'get_latest_by': '-created_at',
'abstract': False,
},
),
migrations.CreateModel(
name='OperatorShift',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),
('start_time', models.DateField(auto_now_add=True, verbose_name='Начало смены')),
('end_time', models.DateField(blank=True, null=True, verbose_name='Конец смены')),
('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operator_shifts', related_query_name='operator_shift', to=settings.AUTH_USER_MODEL, verbose_name='Оператор')),
],
options={
'get_latest_by': '-created_at',
'abstract': False,
},
),
]
|
normal
|
{
"blob_id": "1c979d505b58025aae74865d6556c726ed3f0769",
"index": 2651,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api',\n '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'\n )]\n operations = [migrations.AlterField(model_name='address', name='city',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='city_addresses', related_query_name='city_address',\n to='api.City')), migrations.AlterField(model_name='address', name=\n 'district', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='district_addresses',\n related_query_name='district_address', to='api.District')),\n migrations.AlterField(model_name='address', name='street', field=\n models.ForeignKey(max_length=255, on_delete=django.db.models.fields\n .Empty, related_name='street_addresses', related_query_name=\n 'street_address', to='api.Street', verbose_name='Улица')),\n migrations.AlterField(model_name='couriershift', name='courier',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='couriers', related_query_name='courier', to=settings.\n AUTH_USER_MODEL, verbose_name='Курьер')), migrations.AlterField(\n model_name='couriershift', name='vehicle', field=models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='shift_vehicles', related_query_name='shift_vehicle',\n to='api.Vehicle', verbose_name='Транспортное средство')),\n migrations.AlterField(model_name='couriershift', name=\n 'vehicle_accepted_by', field=models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'vehicle_accepted_bys', related_query_name='vehicle_accepted_by',\n to=settings.AUTH_USER_MODEL, verbose_name='Принял')), migrations.\n AlterField(model_name='couriershift', name='vehicle_given_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, related_name='vehicle_given_bys',\n related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Выдал')), migrations.AlterField(model_name='district',\n name='city', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'city_districts', related_query_name='city_district', to='api.City'\n )), migrations.AlterField(model_name='technicalservice', name=\n 'address', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='address_services',\n related_query_name='address_service', to='api.Address',\n verbose_name='Адрес СТО')), migrations.AlterField(model_name=\n 'vehicleservice', name='service', field=models.ForeignKey(on_delete\n =django.db.models.fields.Empty, related_name='service_vehicles',\n related_query_name='service_vehicle', to='api.TechnicalService')),\n migrations.AlterField(model_name='vehicleservice', name='vehicle',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty,\n related_name='vehicles', related_query_name='vehicle', to=\n 'api.Vehicle')), migrations.CreateModel(name='Order', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('deleted', models.DateTimeField(\n editable=False, null=True)), ('created_at', models.DateTimeField(\n auto_now_add=True, verbose_name='Created at')), ('updated_at',\n models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('status', models.CharField(choices=[('new', 'Новый'), ('accepted',\n 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), (\n 'in_progress', 'Выполняется')], default='new', max_length=100,\n verbose_name='Статус заказа')), ('accepted_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время подтверждения заказа')), ('start_time', models.DateTimeField\n (blank=True, null=True, verbose_name=\n 'Время начала выполнения заказа')), ('end_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время завершения заказа')), ('reciever_name', models.CharField(\n blank=True, max_length=255, null=True, verbose_name=\n 'Имя получателя')), ('info', models.TextField(blank=True, null=True,\n verbose_name='Дополнительные сведения')), ('ransom_sum', models.\n DecimalField(decimal_places=2, max_digits=6, verbose_name=\n 'Сумма выкупа')), ('wait_time', models.TimeField(blank=True, null=\n True, verbose_name='Время ожидания')), ('delivery_cost', models.\n IntegerField(blank=True, null=True, verbose_name=\n 'Стоимость даставки')), ('delivery_time', models.TimeField(blank=\n True, null=True, verbose_name='Время выполнения заказа')), (\n 'courier_shift', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='courier_orders', related_query_name\n ='courier_order', to='api.CourierShift', verbose_name=\n 'Смена курьера')), ('created_by', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='orders_created_by',\n related_query_name='order_created_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Кем создан')), ('customer', models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='customers_orders', related_query_name=\n 'customer_order', to=settings.AUTH_USER_MODEL, verbose_name=\n 'Клиент')), ('delivery_from', models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'address_delivery_from', related_query_name='address_from', to=\n 'api.Address', verbose_name='Забрать от')), ('delivery_to', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='address_delivery_to',\n related_query_name='address_to', to='api.Address', verbose_name=\n 'Куда доставить'))], options={'get_latest_by': '-created_at',\n 'abstract': False}), migrations.CreateModel(name='OperatorShift',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('deleted', models.\n DateTimeField(editable=False, null=True)), ('created_at', models.\n DateTimeField(auto_now_add=True, verbose_name='Created at')), (\n 'updated_at', models.DateTimeField(auto_now_add=True, verbose_name=\n 'Updated at')), ('start_time', models.DateField(auto_now_add=True,\n verbose_name='Начало смены')), ('end_time', models.DateField(blank=\n True, null=True, verbose_name='Конец смены')), ('operator', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='operator_shifts', related_query_name='operator_shift',\n to=settings.AUTH_USER_MODEL, verbose_name='Оператор'))], options={\n 'get_latest_by': '-created_at', 'abstract': False})]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.db.models.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api',\n '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'\n )]\n operations = [migrations.AlterField(model_name='address', name='city',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='city_addresses', related_query_name='city_address',\n to='api.City')), migrations.AlterField(model_name='address', name=\n 'district', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='district_addresses',\n related_query_name='district_address', to='api.District')),\n migrations.AlterField(model_name='address', name='street', field=\n models.ForeignKey(max_length=255, on_delete=django.db.models.fields\n .Empty, related_name='street_addresses', related_query_name=\n 'street_address', to='api.Street', verbose_name='Улица')),\n migrations.AlterField(model_name='couriershift', name='courier',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='couriers', related_query_name='courier', to=settings.\n AUTH_USER_MODEL, verbose_name='Курьер')), migrations.AlterField(\n model_name='couriershift', name='vehicle', field=models.ForeignKey(\n blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='shift_vehicles', related_query_name='shift_vehicle',\n to='api.Vehicle', verbose_name='Транспортное средство')),\n migrations.AlterField(model_name='couriershift', name=\n 'vehicle_accepted_by', field=models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'vehicle_accepted_bys', related_query_name='vehicle_accepted_by',\n to=settings.AUTH_USER_MODEL, verbose_name='Принял')), migrations.\n AlterField(model_name='couriershift', name='vehicle_given_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.\n models.deletion.CASCADE, related_name='vehicle_given_bys',\n related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Выдал')), migrations.AlterField(model_name='district',\n name='city', field=models.ForeignKey(blank=True, null=True,\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'city_districts', related_query_name='city_district', to='api.City'\n )), migrations.AlterField(model_name='technicalservice', name=\n 'address', field=models.ForeignKey(blank=True, null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name='address_services',\n related_query_name='address_service', to='api.Address',\n verbose_name='Адрес СТО')), migrations.AlterField(model_name=\n 'vehicleservice', name='service', field=models.ForeignKey(on_delete\n =django.db.models.fields.Empty, related_name='service_vehicles',\n related_query_name='service_vehicle', to='api.TechnicalService')),\n migrations.AlterField(model_name='vehicleservice', name='vehicle',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty,\n related_name='vehicles', related_query_name='vehicle', to=\n 'api.Vehicle')), migrations.CreateModel(name='Order', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('deleted', models.DateTimeField(\n editable=False, null=True)), ('created_at', models.DateTimeField(\n auto_now_add=True, verbose_name='Created at')), ('updated_at',\n models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('status', models.CharField(choices=[('new', 'Новый'), ('accepted',\n 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), (\n 'in_progress', 'Выполняется')], default='new', max_length=100,\n verbose_name='Статус заказа')), ('accepted_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время подтверждения заказа')), ('start_time', models.DateTimeField\n (blank=True, null=True, verbose_name=\n 'Время начала выполнения заказа')), ('end_time', models.\n DateTimeField(blank=True, null=True, verbose_name=\n 'Время завершения заказа')), ('reciever_name', models.CharField(\n blank=True, max_length=255, null=True, verbose_name=\n 'Имя получателя')), ('info', models.TextField(blank=True, null=True,\n verbose_name='Дополнительные сведения')), ('ransom_sum', models.\n DecimalField(decimal_places=2, max_digits=6, verbose_name=\n 'Сумма выкупа')), ('wait_time', models.TimeField(blank=True, null=\n True, verbose_name='Время ожидания')), ('delivery_cost', models.\n IntegerField(blank=True, null=True, verbose_name=\n 'Стоимость даставки')), ('delivery_time', models.TimeField(blank=\n True, null=True, verbose_name='Время выполнения заказа')), (\n 'courier_shift', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='courier_orders', related_query_name\n ='courier_order', to='api.CourierShift', verbose_name=\n 'Смена курьера')), ('created_by', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='orders_created_by',\n related_query_name='order_created_by', to=settings.AUTH_USER_MODEL,\n verbose_name='Кем создан')), ('customer', models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='customers_orders', related_query_name=\n 'customer_order', to=settings.AUTH_USER_MODEL, verbose_name=\n 'Клиент')), ('delivery_from', models.ForeignKey(blank=True, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'address_delivery_from', related_query_name='address_from', to=\n 'api.Address', verbose_name='Забрать от')), ('delivery_to', models.\n ForeignKey(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='address_delivery_to',\n related_query_name='address_to', to='api.Address', verbose_name=\n 'Куда доставить'))], options={'get_latest_by': '-created_at',\n 'abstract': False}), migrations.CreateModel(name='OperatorShift',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('deleted', models.\n DateTimeField(editable=False, null=True)), ('created_at', models.\n DateTimeField(auto_now_add=True, verbose_name='Created at')), (\n 'updated_at', models.DateTimeField(auto_now_add=True, verbose_name=\n 'Updated at')), ('start_time', models.DateField(auto_now_add=True,\n verbose_name='Начало смены')), ('end_time', models.DateField(blank=\n True, null=True, verbose_name='Конец смены')), ('operator', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='operator_shifts', related_query_name='operator_shift',\n to=settings.AUTH_USER_MODEL, verbose_name='Оператор'))], options={\n 'get_latest_by': '-created_at', 'abstract': False})]\n",
"step-5": "# Generated by Django 2.2.15 on 2020-09-16 03:20\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.db.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0005_cashiershift_couriershift_couriershiftexpenses_dailyransom_expensestype_vehicleservice'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='address',\n name='city',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='city_addresses', related_query_name='city_address', to='api.City'),\n ),\n migrations.AlterField(\n model_name='address',\n name='district',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='district_addresses', related_query_name='district_address', to='api.District'),\n ),\n migrations.AlterField(\n model_name='address',\n name='street',\n field=models.ForeignKey(max_length=255, on_delete=django.db.models.fields.Empty, related_name='street_addresses', related_query_name='street_address', to='api.Street', verbose_name='Улица'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='courier',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='couriers', related_query_name='courier', to=settings.AUTH_USER_MODEL, verbose_name='Курьер'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='vehicle',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shift_vehicles', related_query_name='shift_vehicle', to='api.Vehicle', verbose_name='Транспортное средство'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='vehicle_accepted_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_accepted_bys', related_query_name='vehicle_accepted_by', to=settings.AUTH_USER_MODEL, verbose_name='Принял'),\n ),\n migrations.AlterField(\n model_name='couriershift',\n name='vehicle_given_by',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vehicle_given_bys', related_query_name='vehicle_given_by', to=settings.AUTH_USER_MODEL, verbose_name='Выдал'),\n ),\n migrations.AlterField(\n model_name='district',\n name='city',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='city_districts', related_query_name='city_district', to='api.City'),\n ),\n migrations.AlterField(\n model_name='technicalservice',\n name='address',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_services', related_query_name='address_service', to='api.Address', verbose_name='Адрес СТО'),\n ),\n migrations.AlterField(\n model_name='vehicleservice',\n name='service',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='service_vehicles', related_query_name='service_vehicle', to='api.TechnicalService'),\n ),\n migrations.AlterField(\n model_name='vehicleservice',\n name='vehicle',\n field=models.ForeignKey(on_delete=django.db.models.fields.Empty, related_name='vehicles', related_query_name='vehicle', to='api.Vehicle'),\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('deleted', models.DateTimeField(editable=False, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),\n ('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('status', models.CharField(choices=[('new', 'Новый'), ('accepted', 'Принят'), ('canceled', 'Отменен'), ('done', 'Завершен'), ('in_progress', 'Выполняется')], default='new', max_length=100, verbose_name='Статус заказа')),\n ('accepted_time', models.DateTimeField(blank=True, null=True, verbose_name='Время подтверждения заказа')),\n ('start_time', models.DateTimeField(blank=True, null=True, verbose_name='Время начала выполнения заказа')),\n ('end_time', models.DateTimeField(blank=True, null=True, verbose_name='Время завершения заказа')),\n ('reciever_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Имя получателя')),\n ('info', models.TextField(blank=True, null=True, verbose_name='Дополнительные сведения')),\n ('ransom_sum', models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Сумма выкупа')),\n ('wait_time', models.TimeField(blank=True, null=True, verbose_name='Время ожидания')),\n ('delivery_cost', models.IntegerField(blank=True, null=True, verbose_name='Стоимость даставки')),\n ('delivery_time', models.TimeField(blank=True, null=True, verbose_name='Время выполнения заказа')),\n ('courier_shift', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courier_orders', related_query_name='courier_order', to='api.CourierShift', verbose_name='Смена курьера')),\n ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders_created_by', related_query_name='order_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Кем создан')),\n ('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customers_orders', related_query_name='customer_order', to=settings.AUTH_USER_MODEL, verbose_name='Клиент')),\n ('delivery_from', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_from', related_query_name='address_from', to='api.Address', verbose_name='Забрать от')),\n ('delivery_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='address_delivery_to', related_query_name='address_to', to='api.Address', verbose_name='Куда доставить')),\n ],\n options={\n 'get_latest_by': '-created_at',\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='OperatorShift',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('deleted', models.DateTimeField(editable=False, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),\n ('updated_at', models.DateTimeField(auto_now_add=True, verbose_name='Updated at')),\n ('start_time', models.DateField(auto_now_add=True, verbose_name='Начало смены')),\n ('end_time', models.DateField(blank=True, null=True, verbose_name='Конец смены')),\n ('operator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operator_shifts', related_query_name='operator_shift', to=settings.AUTH_USER_MODEL, verbose_name='Оператор')),\n ],\n options={\n 'get_latest_by': '-created_at',\n 'abstract': False,\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3.7
import Adafruit_GPIO
import Adafruit_GPIO.I2C as I2C
import time
import sys
import argparse
import os
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Select I2C channel multiplexed by TCA9548A")
argparser.add_argument('ch', nargs='?', help="channel", type=int)
args = argparser.parse_args()
TCA9548A = I2C.get_i2c_device(0x70)
if args.ch is None:
for channel in range(0,8):
print(f"== CHANNEL {channel} ==")
TCA9548A.write8(0, 1<<channel)
os.system("i2cdetect -y 1")
else:
TCA9548A.write8(0, 1<<args.ch)
|
normal
|
{
"blob_id": "46aa795bb72db0fcd588b1747e3559b8828be17c",
"index": 6927,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nargparser.add_argument('ch', nargs='?', help='channel', type=int)\n<mask token>\nif args.ch is None:\n for channel in range(0, 8):\n print(f'== CHANNEL {channel} ==')\n TCA9548A.write8(0, 1 << channel)\n os.system('i2cdetect -y 1')\nelse:\n TCA9548A.write8(0, 1 << args.ch)\n",
"step-3": "<mask token>\nargparser = argparse.ArgumentParser(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Select I2C channel multiplexed by TCA9548A')\nargparser.add_argument('ch', nargs='?', help='channel', type=int)\nargs = argparser.parse_args()\nTCA9548A = I2C.get_i2c_device(112)\nif args.ch is None:\n for channel in range(0, 8):\n print(f'== CHANNEL {channel} ==')\n TCA9548A.write8(0, 1 << channel)\n os.system('i2cdetect -y 1')\nelse:\n TCA9548A.write8(0, 1 << args.ch)\n",
"step-4": "import Adafruit_GPIO\nimport Adafruit_GPIO.I2C as I2C\nimport time\nimport sys\nimport argparse\nimport os\nargparser = argparse.ArgumentParser(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Select I2C channel multiplexed by TCA9548A')\nargparser.add_argument('ch', nargs='?', help='channel', type=int)\nargs = argparser.parse_args()\nTCA9548A = I2C.get_i2c_device(112)\nif args.ch is None:\n for channel in range(0, 8):\n print(f'== CHANNEL {channel} ==')\n TCA9548A.write8(0, 1 << channel)\n os.system('i2cdetect -y 1')\nelse:\n TCA9548A.write8(0, 1 << args.ch)\n",
"step-5": "#!/usr/bin/env python3.7\r\nimport Adafruit_GPIO\r\nimport Adafruit_GPIO.I2C as I2C\r\nimport time\r\nimport sys\r\nimport argparse\r\nimport os\r\n\r\nargparser = argparse.ArgumentParser(\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\r\n description=\"Select I2C channel multiplexed by TCA9548A\")\r\nargparser.add_argument('ch', nargs='?', help=\"channel\", type=int)\r\nargs = argparser.parse_args()\r\n\r\nTCA9548A = I2C.get_i2c_device(0x70)\r\n\r\nif args.ch is None:\r\n for channel in range(0,8):\r\n print(f\"== CHANNEL {channel} ==\")\r\n TCA9548A.write8(0, 1<<channel)\r\n os.system(\"i2cdetect -y 1\")\r\nelse:\r\n TCA9548A.write8(0, 1<<args.ch)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pointsEau.models import PointEau
from django.contrib.auth.models import User
from rest_framework import serializers
class PointEauSerializer(serializers.ModelSerializer):
class Meta:
model = PointEau
fields = [
'pk',
'nom',
'lat',
'long',
'desc',
'owner'
]
nom = serializers.CharField(max_length=100)
long = serializers.DecimalField(max_digits=10, decimal_places=8)
lat = serializers.DecimalField(max_digits=10, decimal_places=8)
desc = serializers.CharField(max_length=255)
owner = serializers.ReadOnlyField(source='owner.username')
class UserSerializer(serializers.ModelSerializer):
pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=PointEau.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'pointseau')
|
normal
|
{
"blob_id": "51f171b3847b3dbf5657625fdf3b7fe771e0e004",
"index": 4743,
"step-1": "<mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-2": "<mask token>\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = PointEau\n fields = ['pk', 'nom', 'lat', 'long', 'desc', 'owner']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-3": "<mask token>\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = PointEau\n fields = ['pk', 'nom', 'lat', 'long', 'desc', 'owner']\n nom = serializers.CharField(max_length=100)\n long = serializers.DecimalField(max_digits=10, decimal_places=8)\n lat = serializers.DecimalField(max_digits=10, decimal_places=8)\n desc = serializers.CharField(max_length=255)\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-4": "from pointsEau.models import PointEau\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = PointEau\n fields = ['pk', 'nom', 'lat', 'long', 'desc', 'owner']\n nom = serializers.CharField(max_length=100)\n long = serializers.DecimalField(max_digits=10, decimal_places=8)\n lat = serializers.DecimalField(max_digits=10, decimal_places=8)\n desc = serializers.CharField(max_length=255)\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-5": "from pointsEau.models import PointEau\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n class Meta:\n model = PointEau\n fields = [\n 'pk',\n 'nom',\n 'lat',\n 'long',\n 'desc',\n 'owner'\n ]\n nom = serializers.CharField(max_length=100)\n long = serializers.DecimalField(max_digits=10, decimal_places=8)\n lat = serializers.DecimalField(max_digits=10, decimal_places=8)\n desc = serializers.CharField(max_length=255)\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=PointEau.objects.all())\n\n class Meta:\n model = User\n fields = ('id', 'username', 'pointseau')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=
creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({'region': region, 'zones': zones, 'instance_count':
instance_count, 'ebscount': ebscount, 'unattached_ebs':
unattached_ebs, 'eli_count': eli_count, 'unattached_eli':
unattached_eli, 'elb_count': elb_count, 'event_count': event_count}
)
return render_template('index.html', list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,
'status': vol.status}
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None, vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html', un_eli=un_eli, region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None, ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = {'instance_id': instance.id, 'event': instance.
events[0].code, 'description': instance.events[0].
description, 'event_before': instance.events[0].not_before,
'event_after': instance.events[0].not_after}
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=
instance_event_list)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=
creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({'region': region, 'zones': zones, 'instance_count':
instance_count, 'ebscount': ebscount, 'unattached_ebs':
unattached_ebs, 'eli_count': eli_count, 'unattached_eli':
unattached_eli, 'elb_count': elb_count, 'event_count': event_count}
)
return render_template('index.html', list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,
'status': vol.status}
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None, vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html', un_eli=un_eli, region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None, ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = {'instance_id': instance.id, 'event': instance.
events[0].code, 'description': instance.events[0].
description, 'event_before': instance.events[0].not_before,
'event_after': instance.events[0].not_after}
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=
instance_event_list)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=
creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({'region': region, 'zones': zones, 'instance_count':
instance_count, 'ebscount': ebscount, 'unattached_ebs':
unattached_ebs, 'eli_count': eli_count, 'unattached_eli':
unattached_eli, 'elb_count': elb_count, 'event_count': event_count}
)
return render_template('index.html', list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,
'status': vol.status}
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None, vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html', un_eli=un_eli, region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None, ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = {'instance_id': instance.id, 'event': instance.
events[0].code, 'description': instance.events[0].
description, 'event_before': instance.events[0].not_before,
'event_after': instance.events[0].not_after}
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=
instance_event_list)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
<|reserved_special_token_1|>
from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response
import os, sys
import config
import boto.ec2.elb
import boto
from boto.ec2 import *
app = Flask(__name__)
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=
creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({'region': region, 'zones': zones, 'instance_count':
instance_count, 'ebscount': ebscount, 'unattached_ebs':
unattached_ebs, 'eli_count': eli_count, 'unattached_eli':
unattached_eli, 'elb_count': elb_count, 'event_count': event_count}
)
return render_template('index.html', list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,
'status': vol.status}
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None, vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html', un_eli=un_eli, region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None, ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = {'instance_id': instance.id, 'event': instance.
events[0].code, 'description': instance.events[0].
description, 'event_before': instance.events[0].not_before,
'event_after': instance.events[0].not_after}
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=
instance_event_list)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
flexible
|
{
"blob_id": "22c2425f1dc14b6b0005ebf2231af8abf43aa2e1",
"index": 5273,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\n@app.route('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\n@app.route('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\n@app.route('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\n@app.route('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\n@app.route('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\n@app.route('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\n@app.route('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\n@app.route('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\n@app.route('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\n@app.route('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\n@app.route('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\n@app.route('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\n@app.route('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\n@app.route('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\n@app.route('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-4": "from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response\nimport os, sys\nimport config\nimport boto.ec2.elb\nimport boto\nfrom boto.ec2 import *\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\n@app.route('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\n@app.route('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\n@app.route('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\n@app.route('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\n@app.route('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-5": null,
"step-ids": [
6,
7,
8,
9
]
}
|
[
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from random import choice
SECRET_KEY = ''.join([choice(
'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in
range(50)])
secret = file(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception(
'Please create a %s file with random characters to generate your secret key!'
% SECRET_FILE)
<|reserved_special_token_0|>
try:
from local_settings import *
except:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from random import choice
SECRET_KEY = ''.join([choice(
'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in
range(50)])
secret = file(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception(
'Please create a %s file with random characters to generate your secret key!'
% SECRET_FILE)
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles',
'django.contrib.gis', 'leaflet', 'cts', 'wards', 'bmc')
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware')
ROOT_URLCONF = 'geobombay.urls'
WSGI_APPLICATION = 'geobombay.wsgi.application'
DATABASES = {'default': {'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geobombay'}}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')
STATICFILES_DIRS = os.path.join(BASE_DIR, 'assets', 'static'),
TEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates'),
TEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request',)
LEAFLET_CONFIG = {'DEFAULT_CENTER': (19, 72.85521), 'DEFAULT_ZOOM': 11}
try:
from local_settings import *
except:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from random import choice
SECRET_KEY = ''.join([choice(
'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in
range(50)])
secret = file(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception(
'Please create a %s file with random characters to generate your secret key!'
% SECRET_FILE)
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles',
'django.contrib.gis', 'leaflet', 'cts', 'wards', 'bmc')
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware')
ROOT_URLCONF = 'geobombay.urls'
WSGI_APPLICATION = 'geobombay.wsgi.application'
DATABASES = {'default': {'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geobombay'}}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')
STATICFILES_DIRS = os.path.join(BASE_DIR, 'assets', 'static'),
TEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates'),
TEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request',)
LEAFLET_CONFIG = {'DEFAULT_CENTER': (19, 72.85521), 'DEFAULT_ZOOM': 11}
try:
from local_settings import *
except:
pass
<|reserved_special_token_1|>
"""
Django settings for geobombay project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
try:
SECRET_KEY
except NameError:
SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
from random import choice
SECRET_KEY = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
secret = file(SECRET_FILE, 'w')
secret.write(SECRET_KEY)
secret.close()
except IOError:
Exception('Please create a %s file with random characters to generate your secret key!' % SECRET_FILE)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
#'suit', #Django Suit, skin for the admin
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'leaflet',
'cts',
'wards',
'bmc',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geobombay.urls'
WSGI_APPLICATION = 'geobombay.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geobombay'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets', 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
#Global map / leaflet settings (for django-leaflet plugin we use for admin)
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (19, 72.85521,),
'DEFAULT_ZOOM': 11,
}
try:
from local_settings import *
except:
pass
|
flexible
|
{
"blob_id": "32ca107fde4c98b61d85f6648f30c7601b31c7f3",
"index": 3182,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice(\n 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in\n range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception(\n 'Please create a %s file with random characters to generate your secret key!'\n % SECRET_FILE)\n<mask token>\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-3": "<mask token>\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice(\n 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in\n range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception(\n 'Please create a %s file with random characters to generate your secret key!'\n % SECRET_FILE)\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'django.contrib.gis', 'leaflet', 'cts', 'wards', 'bmc')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'geobombay.urls'\nWSGI_APPLICATION = 'geobombay.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'geobombay'}}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')\nSTATICFILES_DIRS = os.path.join(BASE_DIR, 'assets', 'static'),\nTEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates'),\nTEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request',)\nLEAFLET_CONFIG = {'DEFAULT_CENTER': (19, 72.85521), 'DEFAULT_ZOOM': 11}\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-4": "<mask token>\nimport os\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice(\n 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in\n range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception(\n 'Please create a %s file with random characters to generate your secret key!'\n % SECRET_FILE)\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.messages', 'django.contrib.staticfiles',\n 'django.contrib.gis', 'leaflet', 'cts', 'wards', 'bmc')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'geobombay.urls'\nWSGI_APPLICATION = 'geobombay.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'geobombay'}}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')\nSTATICFILES_DIRS = os.path.join(BASE_DIR, 'assets', 'static'),\nTEMPLATE_DIRS = os.path.join(BASE_DIR, 'templates'),\nTEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request',)\nLEAFLET_CONFIG = {'DEFAULT_CENTER': (19, 72.85521), 'DEFAULT_ZOOM': 11}\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-5": "\"\"\"\nDjango settings for geobombay project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nDATA_DIR = os.path.join(BASE_DIR, 'data')\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(BASE_DIR, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n from random import choice\n SECRET_KEY = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % SECRET_FILE)\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n #'suit', #Django Suit, skin for the admin\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'leaflet',\n 'cts',\n 'wards',\n 'bmc',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'geobombay.urls'\n\nWSGI_APPLICATION = 'geobombay.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.contrib.gis.db.backends.postgis',\n 'NAME': 'geobombay'\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'assets', 'collected-static')\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'assets', 'static'),\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates'),\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = TCP + (\n 'django.core.context_processors.request',\n)\n\n#Global map / leaflet settings (for django-leaflet plugin we use for admin)\nLEAFLET_CONFIG = {\n 'DEFAULT_CENTER': (19, 72.85521,),\n 'DEFAULT_ZOOM': 11,\n}\n\ntry:\n from local_settings import *\nexcept:\n pass\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(number)
<|reserved_special_token_0|>
print(c)
<|reserved_special_token_0|>
print(word1, ' ' + word2, ' ' + word3)
<|reserved_special_token_0|>
print(word[:4])
<|reserved_special_token_1|>
number = (30 * 39 + 300) ** 10
print(number)
x = 0.005
y = 0.1
c = x + y
print(c)
<|reserved_special_token_0|>
word1 = 'the study or use of the systems'
word2 = 'especially computers and communications'
word3 = 'for storing, retrieving, and sending information'
print(word1, ' ' + word2, ' ' + word3)
word = 'Mystery'
print(word[:4])
<|reserved_special_token_1|>
# 1 use the operators to solve for the following equation:
# (a)
number = ((30*39) + 300) **10
print(number)
# find the value of C. X + Y = C Given:
x = 0.0050
y = 0.1000
c = x + y
print(c)
"""
what is the result of the following:
(a) take the sentence:
the study or use of the systems
(especially computers and communications)
for storing, retrieving, and sending information
"""
"""
strore each word in a separate variable, then print out the sentence on the one line using the print function
"""
word1 = "the study or use of the systems"
word2 = "especially computers and communications"
word3 = "for storing, retrieving, and sending information"
print(word1, " " + word2, " " + word3)
# (b) what is output ?
word = "Mystery"
print(word[:4])
|
flexible
|
{
"blob_id": "c2f82cf73d095979d1da346b7dd7779bcc675805",
"index": 4045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(number)\n<mask token>\nprint(c)\n<mask token>\nprint(word1, ' ' + word2, ' ' + word3)\n<mask token>\nprint(word[:4])\n",
"step-3": "number = (30 * 39 + 300) ** 10\nprint(number)\nx = 0.005\ny = 0.1\nc = x + y\nprint(c)\n<mask token>\nword1 = 'the study or use of the systems'\nword2 = 'especially computers and communications'\nword3 = 'for storing, retrieving, and sending information'\nprint(word1, ' ' + word2, ' ' + word3)\nword = 'Mystery'\nprint(word[:4])\n",
"step-4": "# 1 use the operators to solve for the following equation:\n# (a) \nnumber = ((30*39) + 300) **10\nprint(number)\n\n# find the value of C. X + Y = C Given:\nx = 0.0050\ny = 0.1000\n\nc = x + y\nprint(c)\n\n\"\"\"\n what is the result of the following:\n (a) take the sentence:\n the study or use of the systems\n (especially computers and communications)\n for storing, retrieving, and sending information\n\"\"\"\n\"\"\"\nstrore each word in a separate variable, then print out the sentence on the one line using the print function\n\"\"\"\nword1 = \"the study or use of the systems\"\nword2 = \"especially computers and communications\"\nword3 = \"for storing, retrieving, and sending information\"\nprint(word1, \" \" + word2, \" \" + word3)\n\n# (b) what is output ?\n\nword = \"Mystery\"\nprint(word[:4])\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import dataset
import json
import gc
import os
jsonDir = "/home/jr/share/python/music-visualizer/merged"
db = dataset.connect('sqlite:///test.db')
table = db['Songs']
for root, subFolders, files in os.walk(jsonDir):
for f in files:
print("file:{}".format(f))
gc.collect()
tmpJson = json.load(open(os.path.join(root, f)))
for Artist in tmpJson:
for song in tmpJson[Artist]["Songs"]:
table.insert(song)
import urllib2
import json
import re
#in_artist
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
nodes = []
edges = []
anchor = "Rihanna"
q = [anchor]
while len(q) > 0:
art = q.pop(0)
#get song list
url = "http://10.104.246.185:5000/artist/"+art.replace(" ", "%20")
response = urllib2.urlopen(url)
dictionary = byteify(json.loads(response.read()))
songlist = []
if (dictionary):
lst = dictionary["Songs"]
for song in lst:
songlist.append(song["Title"])
for song in songlist:
#get string of featured artists
m = re.match('.+[fF]eat. ([^)(/]+)', song)
if m:
s = m.group(1)
#split into artists
lst = s.split(",")
lstend = (lst.pop()).split("&")
lst.extend(lstend)
for a in lst:
a = a.strip()
edges.append((art.strip(),a))
if nodes.count(a) == 0:
q.append(a)
for b in lst:
b = b.strip()
if a != b:
edges.append((a,b))
if nodes.count(art) == 0:
nodes.append(art.strip())
i = 0
j = 0
while i < len(edges)-1:
j = i+1
t1 = edges[i]
while j < len(edges):
t2 = edges[j]
if t1[0] == t2[0] and t1[1] == t2[1]:
edges.pop(j)
elif t1[1] == t2[0] and t1[0] == t2[1]:
edges.pop(j)
elif t2[0] == t2[1]:
edges.pop(j)
else:
j = j + 1
i = i + 1
print nodes
print edges
|
normal
|
{
"blob_id": "3461e9dceb2c0bfc49002809154f8be4cd8c66e2",
"index": 1483,
"step-1": "import dataset\nimport json\nimport gc\nimport os\n\njsonDir = \"/home/jr/share/python/music-visualizer/merged\"\n\ndb = dataset.connect('sqlite:///test.db')\ntable = db['Songs']\n\nfor root, subFolders, files in os.walk(jsonDir):\n for f in files:\n print(\"file:{}\".format(f))\n gc.collect()\n tmpJson = json.load(open(os.path.join(root, f)))\n for Artist in tmpJson:\n for song in tmpJson[Artist][\"Songs\"]:\n table.insert(song)\nimport urllib2\nimport json\nimport re\n#in_artist\ndef byteify(input):\n if isinstance(input, dict):\n return {byteify(key): byteify(value) for key, value in input.iteritems()}\n elif isinstance(input, list):\n return [byteify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input\n\nnodes = []\nedges = []\n\nanchor = \"Rihanna\"\n\nq = [anchor]\n\nwhile len(q) > 0:\n art = q.pop(0)\n #get song list\n url = \"http://10.104.246.185:5000/artist/\"+art.replace(\" \", \"%20\")\n response = urllib2.urlopen(url)\n dictionary = byteify(json.loads(response.read()))\n songlist = []\n if (dictionary):\n lst = dictionary[\"Songs\"]\n for song in lst:\n songlist.append(song[\"Title\"])\n for song in songlist:\n #get string of featured artists\n m = re.match('.+[fF]eat. ([^)(/]+)', song) \n if m:\n s = m.group(1)\n #split into artists\n lst = s.split(\",\")\n lstend = (lst.pop()).split(\"&\")\n lst.extend(lstend)\n for a in lst:\n a = a.strip()\n edges.append((art.strip(),a))\n if nodes.count(a) == 0:\n q.append(a)\n for b in lst:\n b = b.strip()\n if a != b:\n edges.append((a,b))\n\n if nodes.count(art) == 0:\n nodes.append(art.strip())\n\ni = 0\nj = 0\nwhile i < len(edges)-1:\n j = i+1\n t1 = edges[i]\n while j < len(edges):\n t2 = edges[j]\n if t1[0] == t2[0] and t1[1] == t2[1]:\n edges.pop(j)\n elif t1[1] == t2[0] and t1[0] == t2[1]:\n edges.pop(j)\n elif t2[0] == t2[1]:\n edges.pop(j)\n else:\n j = j + 1\n \n i = i + 1\nprint nodes\nprint edges\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from .score_funcs import *
from cryptonita.fuzzy_set import FuzzySet
from cryptonita.helpers import are_bytes_or_fail
def scoring(msg, space, score_func, min_score=0.5, **score_func_params):
''' Run the score function over the given message and over a parametric
value x. Return all the values x as a FuzzySet (guess)
which scores is greather than the minimum score.
The parametric space <space> can is defined as:
- a range object
- or any other iterable of the parametric values x
For each possible x, score each using <score_func> and
drop anyone with a score of <min_score> or less.
Extra parameters can be passed to the <score_func> using
<score_func_params>.
Return a FuzzySet with the x values.
'''
assert 0.0 <= min_score <= 1.0
are_bytes_or_fail(msg, 'msg')
params = score_func_params
lengths = FuzzySet(
((x, score_func(msg, x, **params)) for x in space),
pr='tuple',
min_membership=min_score
)
return lengths
|
normal
|
{
"blob_id": "99048ddb3f42382c8b8b435d832a45011a031cf1",
"index": 8537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n \"\"\" Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n \"\"\"\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n params = score_func_params\n lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),\n pr='tuple', min_membership=min_score)\n return lengths\n",
"step-3": "from .score_funcs import *\nfrom cryptonita.fuzzy_set import FuzzySet\nfrom cryptonita.helpers import are_bytes_or_fail\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n \"\"\" Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n \"\"\"\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n params = score_func_params\n lengths = FuzzySet(((x, score_func(msg, x, **params)) for x in space),\n pr='tuple', min_membership=min_score)\n return lengths\n",
"step-4": "from .score_funcs import *\n\nfrom cryptonita.fuzzy_set import FuzzySet\nfrom cryptonita.helpers import are_bytes_or_fail\n\n\ndef scoring(msg, space, score_func, min_score=0.5, **score_func_params):\n ''' Run the score function over the given message and over a parametric\n value x. Return all the values x as a FuzzySet (guess)\n which scores is greather than the minimum score.\n\n The parametric space <space> can is defined as:\n - a range object\n - or any other iterable of the parametric values x\n\n For each possible x, score each using <score_func> and\n drop anyone with a score of <min_score> or less.\n\n Extra parameters can be passed to the <score_func> using\n <score_func_params>.\n\n Return a FuzzySet with the x values.\n '''\n assert 0.0 <= min_score <= 1.0\n are_bytes_or_fail(msg, 'msg')\n\n params = score_func_params\n lengths = FuzzySet(\n ((x, score_func(msg, x, **params)) for x in space),\n pr='tuple',\n min_membership=min_score\n )\n return lengths\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 19 09:41:08 2018
hexatrigesimal to decimal calculator,
base 36 encoding; use of letters with digits.
@author: susan
"""
## create a dictionary as reference for BASE 36 calculations
WORD = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" # digits of BASE 36
BASE = {}
for i, item in enumerate(WORD): # iterate through word
BASE.update({WORD[i]:i}) # update BASE dictionary with key:value pair
# input function, BASE 36 numbers for calculations.
def enter_num():
""" get user input and do error checking for illegal digits.
returns
-------
num
"""
num = input("please enter a BASE 36 number, e.g. A36Z :> ")
num = num.upper()
for digit in num:
digit = digit.upper()
if digit not in WORD:
print(" **error** user input failed\n")
print("do you want to re enter number")
ans = input("y or n ")
ans = ans.upper()
if ans == "Y":
num = enter_num()
else:
num = None
return num
# make list function.
def mk_num_lst(num):
""" make BASE 36 number from user into a list.
reverse list so digit are read left to right.
returns
-------
num_lst
"""
num_lst = []
for digit in num:
num_lst.append(digit)
num_lst.reverse()
return num_lst
# convert function.
def convert(num_lst):
""" convert each digit to power of 36 appropriately.
prints result in decimal.
returns
-------
dec
"""
dec = 0
for i in range(0, len(num_lst)):
print("position right to left is >", i+1,
"value is ", BASE[(num_lst[i])],
"decimal value is",
(36**i) * BASE[(num_lst[i])])
dec += (36**i) * BASE[(num_lst[i])]
return dec
# main program flow function.
def main():
"""
process valid user input or
terminate program on failed input.
"""
num = enter_num()
if num is not None:
num_lst = mk_num_lst(num)
dec = convert(num_lst)
print("decimal value of BASE 36 number", num, "is", dec)
else:
print("user terminated program")
# program start.
main()
|
normal
|
{
"blob_id": "5a265ecb9f1d6d0e4a5c66d241fbfe4a6df97825",
"index": 8191,
"step-1": "<mask token>\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\ndef convert(num_lst):\n \"\"\" convert each digit to power of 36 appropriately.\n prints result in decimal.\n returns\n -------\n dec\n \"\"\"\n dec = 0\n for i in range(0, len(num_lst)):\n print('position right to left is >', i + 1, 'value is ', BASE[\n num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]])\n dec += 36 ** i * BASE[num_lst[i]]\n return dec\n\n\ndef main():\n \"\"\"\n process valid user input or\n terminate program on failed input.\n \"\"\"\n num = enter_num()\n if num is not None:\n num_lst = mk_num_lst(num)\n dec = convert(num_lst)\n print('decimal value of BASE 36 number', num, 'is', dec)\n else:\n print('user terminated program')\n\n\n<mask token>\n",
"step-3": "<mask token>\nfor i, item in enumerate(WORD):\n BASE.update({WORD[i]: i})\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\ndef convert(num_lst):\n \"\"\" convert each digit to power of 36 appropriately.\n prints result in decimal.\n returns\n -------\n dec\n \"\"\"\n dec = 0\n for i in range(0, len(num_lst)):\n print('position right to left is >', i + 1, 'value is ', BASE[\n num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]])\n dec += 36 ** i * BASE[num_lst[i]]\n return dec\n\n\ndef main():\n \"\"\"\n process valid user input or\n terminate program on failed input.\n \"\"\"\n num = enter_num()\n if num is not None:\n num_lst = mk_num_lst(num)\n dec = convert(num_lst)\n print('decimal value of BASE 36 number', num, 'is', dec)\n else:\n print('user terminated program')\n\n\nmain()\n",
"step-4": "<mask token>\nWORD = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nBASE = {}\nfor i, item in enumerate(WORD):\n BASE.update({WORD[i]: i})\n\n\ndef enter_num():\n \"\"\" get user input and do error checking for illegal digits.\n returns\n -------\n num\n \"\"\"\n num = input('please enter a BASE 36 number, e.g. A36Z :> ')\n num = num.upper()\n for digit in num:\n digit = digit.upper()\n if digit not in WORD:\n print(' **error** user input failed\\n')\n print('do you want to re enter number')\n ans = input('y or n ')\n ans = ans.upper()\n if ans == 'Y':\n num = enter_num()\n else:\n num = None\n return num\n\n\ndef mk_num_lst(num):\n \"\"\" make BASE 36 number from user into a list.\n reverse list so digit are read left to right.\n returns\n -------\n num_lst\n \"\"\"\n num_lst = []\n for digit in num:\n num_lst.append(digit)\n num_lst.reverse()\n return num_lst\n\n\ndef convert(num_lst):\n \"\"\" convert each digit to power of 36 appropriately.\n prints result in decimal.\n returns\n -------\n dec\n \"\"\"\n dec = 0\n for i in range(0, len(num_lst)):\n print('position right to left is >', i + 1, 'value is ', BASE[\n num_lst[i]], 'decimal value is', 36 ** i * BASE[num_lst[i]])\n dec += 36 ** i * BASE[num_lst[i]]\n return dec\n\n\ndef main():\n \"\"\"\n process valid user input or\n terminate program on failed input.\n \"\"\"\n num = enter_num()\n if num is not None:\n num_lst = mk_num_lst(num)\n dec = convert(num_lst)\n print('decimal value of BASE 36 number', num, 'is', dec)\n else:\n print('user terminated program')\n\n\nmain()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 19 09:41:08 2018\r\nhexatrigesimal to decimal calculator,\r\nbase 36 encoding; use of letters with digits.\r\n@author: susan\r\n\"\"\"\r\n## create a dictionary as reference for BASE 36 calculations\r\nWORD = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\" # digits of BASE 36\r\nBASE = {}\r\nfor i, item in enumerate(WORD): # iterate through word\r\n BASE.update({WORD[i]:i}) # update BASE dictionary with key:value pair\r\n# input function, BASE 36 numbers for calculations.\r\ndef enter_num():\r\n \"\"\" get user input and do error checking for illegal digits.\r\n returns\r\n -------\r\n num\r\n \"\"\"\r\n num = input(\"please enter a BASE 36 number, e.g. A36Z :> \")\r\n num = num.upper()\r\n for digit in num:\r\n digit = digit.upper()\r\n if digit not in WORD:\r\n print(\" **error** user input failed\\n\")\r\n print(\"do you want to re enter number\")\r\n ans = input(\"y or n \")\r\n ans = ans.upper()\r\n if ans == \"Y\":\r\n num = enter_num()\r\n else:\r\n num = None\r\n return num\r\n# make list function.\r\ndef mk_num_lst(num):\r\n \"\"\" make BASE 36 number from user into a list.\r\n reverse list so digit are read left to right.\r\n returns\r\n -------\r\n num_lst\r\n \"\"\"\r\n num_lst = []\r\n for digit in num:\r\n num_lst.append(digit)\r\n num_lst.reverse()\r\n return num_lst\r\n# convert function.\r\ndef convert(num_lst):\r\n \"\"\" convert each digit to power of 36 appropriately.\r\n prints result in decimal.\r\n returns\r\n -------\r\n dec\r\n \"\"\"\r\n dec = 0\r\n for i in range(0, len(num_lst)):\r\n print(\"position right to left is >\", i+1,\r\n \"value is \", BASE[(num_lst[i])],\r\n \"decimal value is\",\r\n (36**i) * BASE[(num_lst[i])])\r\n dec += (36**i) * BASE[(num_lst[i])]\r\n return dec\r\n# main program flow function.\r\ndef main():\r\n \"\"\"\r\n process valid user input or\r\n terminate program on failed input.\r\n \"\"\"\r\n num = enter_num()\r\n if num is not None:\r\n num_lst = mk_num_lst(num)\r\n dec = convert(num_lst)\r\n print(\"decimal value of BASE 36 number\", num, \"is\", dec)\r\n else:\r\n print(\"user terminated program\")\r\n# program start.\r\nmain()\r\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from django.urls import path,include
from.import views
from user.views import DetailsChangeView, HomeView, PasswordChangeView,SignUpView,LoginView,SettingsView,LogoutView,CreatePostView,CommentPostView,PasswordChangeView
urlpatterns = [
path('', HomeView.as_view(), name = 'HomeView'),
path('LoginView/', LoginView.as_view(), name = 'LoginView'),
path('SignUpView/',SignUpView.as_view(), name = 'SignUpView' ),
path('SettingsView/', SettingsView.as_view(), name = 'SettingsView'),
path('LogoutView/', LogoutView.as_view(), name = 'LogoutView'),
path('social_auth/', include('social_django.urls', namespace = 'social')),
path('users_list/', views.users_list, name = 'users_list'),
path('CreatePostView/', CreatePostView.as_view(), name = 'CreatePostView'),
path('like/<int:id>/', views.like , name = 'like'),
path('CommentPostView/<int:id>/', CommentPostView.as_view(), name = 'CommentPostView'),
path('follow/<int:id>/', views.follow , name = 'follow'),
path('followback/<int:id>/', views.followback, name = 'followback'),
path('delete_request/<int:id>/',views.delete_request, name = 'delete_request'),
path('unfriend/<int:id>/', views.unfriend, name = 'unfriend'),
path('friendslist/<int:id>/',views.friendslist, name = 'friendslist'),
# path('FollowListView/<int:id>/',FollowListView.as_view(), name = 'FollowListView')
path('PasswordChangeView/', PasswordChangeView.as_view(), name = 'PasswordChangeView'),
path('DetailsChangeView/', DetailsChangeView.as_view(), name= 'DetailsChangeView'),
path('user_profile_view/<int:id>/',views.user_profile_view, name = 'user_profile_view'),
path('start_chat/<int:id>/', views.start_chat, name= 'start_chat'),
path('search_function/', views.search_function, name='search_function')
]
|
normal
|
{
"blob_id": "5bd8cee2595215fda6ab523a646cf918e3d84a50",
"index": 937,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(\n 'LoginView/', LoginView.as_view(), name='LoginView'), path(\n 'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(\n 'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(\n 'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(\n 'social_auth/', include('social_django.urls', namespace='social')),\n path('users_list/', views.users_list, name='users_list'), path(\n 'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),\n path('like/<int:id>/', views.like, name='like'), path(\n 'CommentPostView/<int:id>/', CommentPostView.as_view(), name=\n 'CommentPostView'), path('follow/<int:id>/', views.follow, name=\n 'follow'), path('followback/<int:id>/', views.followback, name=\n 'followback'), path('delete_request/<int:id>/', views.delete_request,\n name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name\n ='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=\n 'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(\n ), name='PasswordChangeView'), path('DetailsChangeView/',\n DetailsChangeView.as_view(), name='DetailsChangeView'), path(\n 'user_profile_view/<int:id>/', views.user_profile_view, name=\n 'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,\n name='start_chat'), path('search_function/', views.search_function,\n name='search_function')]\n",
"step-3": "from django.urls import path, include\nfrom . import views\nfrom user.views import DetailsChangeView, HomeView, PasswordChangeView, SignUpView, LoginView, SettingsView, LogoutView, CreatePostView, CommentPostView, PasswordChangeView\nurlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(\n 'LoginView/', LoginView.as_view(), name='LoginView'), path(\n 'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(\n 'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(\n 'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(\n 'social_auth/', include('social_django.urls', namespace='social')),\n path('users_list/', views.users_list, name='users_list'), path(\n 'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),\n path('like/<int:id>/', views.like, name='like'), path(\n 'CommentPostView/<int:id>/', CommentPostView.as_view(), name=\n 'CommentPostView'), path('follow/<int:id>/', views.follow, name=\n 'follow'), path('followback/<int:id>/', views.followback, name=\n 'followback'), path('delete_request/<int:id>/', views.delete_request,\n name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name\n ='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=\n 'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(\n ), name='PasswordChangeView'), path('DetailsChangeView/',\n DetailsChangeView.as_view(), name='DetailsChangeView'), path(\n 'user_profile_view/<int:id>/', views.user_profile_view, name=\n 'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,\n name='start_chat'), path('search_function/', views.search_function,\n name='search_function')]\n",
"step-4": "from django.urls import path,include\nfrom.import views\nfrom user.views import DetailsChangeView, HomeView, PasswordChangeView,SignUpView,LoginView,SettingsView,LogoutView,CreatePostView,CommentPostView,PasswordChangeView\n\nurlpatterns = [\n path('', HomeView.as_view(), name = 'HomeView'),\n path('LoginView/', LoginView.as_view(), name = 'LoginView'),\n path('SignUpView/',SignUpView.as_view(), name = 'SignUpView' ),\n path('SettingsView/', SettingsView.as_view(), name = 'SettingsView'),\n path('LogoutView/', LogoutView.as_view(), name = 'LogoutView'),\n path('social_auth/', include('social_django.urls', namespace = 'social')),\n path('users_list/', views.users_list, name = 'users_list'),\n path('CreatePostView/', CreatePostView.as_view(), name = 'CreatePostView'),\n path('like/<int:id>/', views.like , name = 'like'),\n path('CommentPostView/<int:id>/', CommentPostView.as_view(), name = 'CommentPostView'),\n path('follow/<int:id>/', views.follow , name = 'follow'),\n path('followback/<int:id>/', views.followback, name = 'followback'),\n path('delete_request/<int:id>/',views.delete_request, name = 'delete_request'),\n path('unfriend/<int:id>/', views.unfriend, name = 'unfriend'),\n path('friendslist/<int:id>/',views.friendslist, name = 'friendslist'),\n # path('FollowListView/<int:id>/',FollowListView.as_view(), name = 'FollowListView')\n path('PasswordChangeView/', PasswordChangeView.as_view(), name = 'PasswordChangeView'),\n path('DetailsChangeView/', DetailsChangeView.as_view(), name= 'DetailsChangeView'),\n path('user_profile_view/<int:id>/',views.user_profile_view, name = 'user_profile_view'),\n path('start_chat/<int:id>/', views.start_chat, name= 'start_chat'),\n path('search_function/', views.search_function, name='search_function')\n \n \n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(id(x))
print(id(y))
print()
<|reserved_special_token_0|>
print(id(x))
print(id(y))
print()
<|reserved_special_token_0|>
print(z)
print(w)
print(id(z))
print(id(w))
print()
<|reserved_special_token_0|>
print(z)
print(w)
print(id(z))
print(id(w))
<|reserved_special_token_1|>
x = 5
y = x
print(id(x))
print(id(y))
print()
y = 3
print(id(x))
print(id(y))
print()
z = [1, 4, 3, 25]
w = z
print(z)
print(w)
print(id(z))
print(id(w))
print()
w[1] = 10
print(z)
print(w)
print(id(z))
print(id(w))
<|reserved_special_token_1|>
x = 5
y = x
print(id(x))
print(id(y))
print()
y = 3
print(id(x))
print(id(y))
print()
z = [1, 4, 3, 25]
w = z
print(z)
print(w)
print(id(z))
print(id(w))
print()
w[1] = 10
print(z)
print(w)
print(id(z))
print(id(w))
# So when you assign a mutable, you're actually assigning a reference to the mutable,
# and I have the side effect that when I change an element of that list in one place,
# it gets changed in both places because it's really just one object, and functions work exactly the same way.
|
flexible
|
{
"blob_id": "956adc5961188458393b56564649ad0a3a787669",
"index": 7327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(id(x))\nprint(id(y))\nprint()\n<mask token>\nprint(id(x))\nprint(id(y))\nprint()\n<mask token>\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\nprint()\n<mask token>\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n",
"step-3": "x = 5\ny = x\nprint(id(x))\nprint(id(y))\nprint()\ny = 3\nprint(id(x))\nprint(id(y))\nprint()\nz = [1, 4, 3, 25]\nw = z\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\nprint()\nw[1] = 10\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n",
"step-4": "x = 5\ny = x\n\nprint(id(x))\nprint(id(y))\n\nprint()\n\ny = 3\n\nprint(id(x))\nprint(id(y))\n\nprint()\n\nz = [1, 4, 3, 25]\nw = z\n\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n\nprint()\n\nw[1] = 10\n\nprint(z)\nprint(w)\nprint(id(z))\nprint(id(w))\n\n# So when you assign a mutable, you're actually assigning a reference to the mutable,\n# and I have the side effect that when I change an element of that list in one place,\n# it gets changed in both places because it's really just one object, and functions work exactly the same way.\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import smtplib
from email.mime.text import MIMEText
from email.utils import formatdate
from ... import config
def create_message(from_addr, to_addr, subject, message, encoding):
body = MIMEText(message, 'plain', encoding)
body['Subject'] = subject
body['From'] = from_addr
body['To'] = to_addr
body['Date'] = formatdate()
return body
def send_via_gmail(from_addr, to_addr, body):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login( config['gmail']['user'], config['gmail']['password'])
s.sendmail(from_addr, [to_addr], body.as_string())
s.close()
def gmail(message, to_addr):
body = create_message(
config['gmail']['user'], to_addr, '[Notification]', message, 'utf8')
send_via_gmail(config['gmail']['user'], to_addr, body)
return
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if (argc < 3):
print('USAGE: python gmail.py address message')
raise SystemExit(0)
else:
to_addr = argvs[1]
message = argvs[2]
gmail(message, to_addr)
|
normal
|
{
"blob_id": "237724db5130926123a3a31be7070947ec7b01f3",
"index": 3492,
"step-1": "<mask token>\n\n\ndef create_message(from_addr, to_addr, subject, message, encoding):\n body = MIMEText(message, 'plain', encoding)\n body['Subject'] = subject\n body['From'] = from_addr\n body['To'] = to_addr\n body['Date'] = formatdate()\n return body\n\n\ndef send_via_gmail(from_addr, to_addr, body):\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(config['gmail']['user'], config['gmail']['password'])\n s.sendmail(from_addr, [to_addr], body.as_string())\n s.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_message(from_addr, to_addr, subject, message, encoding):\n body = MIMEText(message, 'plain', encoding)\n body['Subject'] = subject\n body['From'] = from_addr\n body['To'] = to_addr\n body['Date'] = formatdate()\n return body\n\n\ndef send_via_gmail(from_addr, to_addr, body):\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(config['gmail']['user'], config['gmail']['password'])\n s.sendmail(from_addr, [to_addr], body.as_string())\n s.close()\n\n\ndef gmail(message, to_addr):\n body = create_message(config['gmail']['user'], to_addr,\n '[Notification]', message, 'utf8')\n send_via_gmail(config['gmail']['user'], to_addr, body)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_message(from_addr, to_addr, subject, message, encoding):\n body = MIMEText(message, 'plain', encoding)\n body['Subject'] = subject\n body['From'] = from_addr\n body['To'] = to_addr\n body['Date'] = formatdate()\n return body\n\n\ndef send_via_gmail(from_addr, to_addr, body):\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(config['gmail']['user'], config['gmail']['password'])\n s.sendmail(from_addr, [to_addr], body.as_string())\n s.close()\n\n\ndef gmail(message, to_addr):\n body = create_message(config['gmail']['user'], to_addr,\n '[Notification]', message, 'utf8')\n send_via_gmail(config['gmail']['user'], to_addr, body)\n return\n\n\nif __name__ == '__main__':\n argvs = sys.argv\n argc = len(argvs)\n if argc < 3:\n print('USAGE: python gmail.py address message')\n raise SystemExit(0)\n else:\n to_addr = argvs[1]\n message = argvs[2]\n gmail(message, to_addr)\n",
"step-4": "import sys\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom ... import config\n\n\ndef create_message(from_addr, to_addr, subject, message, encoding):\n body = MIMEText(message, 'plain', encoding)\n body['Subject'] = subject\n body['From'] = from_addr\n body['To'] = to_addr\n body['Date'] = formatdate()\n return body\n\n\ndef send_via_gmail(from_addr, to_addr, body):\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(config['gmail']['user'], config['gmail']['password'])\n s.sendmail(from_addr, [to_addr], body.as_string())\n s.close()\n\n\ndef gmail(message, to_addr):\n body = create_message(config['gmail']['user'], to_addr,\n '[Notification]', message, 'utf8')\n send_via_gmail(config['gmail']['user'], to_addr, body)\n return\n\n\nif __name__ == '__main__':\n argvs = sys.argv\n argc = len(argvs)\n if argc < 3:\n print('USAGE: python gmail.py address message')\n raise SystemExit(0)\n else:\n to_addr = argvs[1]\n message = argvs[2]\n gmail(message, to_addr)\n",
"step-5": "import sys\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.utils import formatdate\nfrom ... import config\n\ndef create_message(from_addr, to_addr, subject, message, encoding):\n body = MIMEText(message, 'plain', encoding)\n body['Subject'] = subject\n body['From'] = from_addr\n body['To'] = to_addr\n body['Date'] = formatdate()\n return body\n\n\ndef send_via_gmail(from_addr, to_addr, body):\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login( config['gmail']['user'], config['gmail']['password'])\n s.sendmail(from_addr, [to_addr], body.as_string())\n s.close()\n\n\ndef gmail(message, to_addr):\n body = create_message(\n config['gmail']['user'], to_addr, '[Notification]', message, 'utf8')\n send_via_gmail(config['gmail']['user'], to_addr, body)\n return\n\n\nif __name__ == '__main__':\n argvs = sys.argv\n argc = len(argvs)\n\n if (argc < 3):\n print('USAGE: python gmail.py address message')\n raise SystemExit(0)\n else:\n to_addr = argvs[1]\n message = argvs[2]\n\n gmail(message, to_addr)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# KeyLogger.py
# show a character key when pressed without using Enter key
# hide the Tkinter GUI window, only console shows
import Tkinter as tk
def key(event):
if event.keysym == 'Escape':
root.destroy()
print event.char, event.keysym
root = tk.Tk()
print "Press a key (Escape key to exit):"
root.bind_all('<Key>', key)
# don't show the tk window
root.withdraw()
root.mainloop()
|
normal
|
{
"blob_id": "368151a134f987ed78c8048521137672530b5cce",
"index": 1022,
"step-1": "# KeyLogger.py\n# show a character key when pressed without using Enter key\n# hide the Tkinter GUI window, only console shows\n\nimport Tkinter as tk\n\ndef key(event):\n if event.keysym == 'Escape':\n root.destroy()\n print event.char, event.keysym\n\nroot = tk.Tk()\nprint \"Press a key (Escape key to exit):\"\nroot.bind_all('<Key>', key)\n# don't show the tk window\nroot.withdraw()\nroot.mainloop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class DevConfig(Config):
<|reserved_special_token_0|>
ENV = 'dev'
DEBUG = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Config(object):
<|reserved_special_token_0|>
SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key')
APP_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
ASSIST_ACTIONS_ON_GOOGLE = True
CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'
DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key')
APP_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
ASSIST_ACTIONS_ON_GOOGLE = True
CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'
DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Application configuration.
See https://github.com/sloria/cookiecutter-flask for configuration options with other flask-extensions
"""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
# Flask-Assistant Integrations
ASSIST_ACTIONS_ON_GOOGLE = True
CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'
DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
|
flexible
|
{
"blob_id": "4cc1c8668a84cc6faadf60053568d155b8852c5f",
"index": 5643,
"step-1": "<mask token>\n\n\nclass DevConfig(Config):\n <mask token>\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-2": "<mask token>\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-3": "<mask token>\n\n\nclass Config(object):\n <mask token>\n SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key')\n APP_DIR = os.path.abspath(os.path.dirname(__file__))\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n ASSIST_ACTIONS_ON_GOOGLE = True\n CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'\n DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-4": "<mask token>\nimport os\n\n\nclass Config(object):\n \"\"\"Base configuration.\"\"\"\n SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key')\n APP_DIR = os.path.abspath(os.path.dirname(__file__))\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n ASSIST_ACTIONS_ON_GOOGLE = True\n CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'\n DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'\n\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n ENV = 'dev'\n DEBUG = True\n\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n TESTING = True\n DEBUG = True\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Application configuration.\n\nSee https://github.com/sloria/cookiecutter-flask for configuration options with other flask-extensions\n\"\"\"\nimport os\n\n\nclass Config(object):\n \"\"\"Base configuration.\"\"\"\n\n SECRET_KEY = os.environ.get('DELIVERY_ASSISTANT_SECRET', 'secret-key') # TODO: Change me\n APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory\n PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))\n\n # Flask-Assistant Integrations\n ASSIST_ACTIONS_ON_GOOGLE = True\n CLIENT_ACCESS_TOKEN = 'YOUR API.AI AGENT CLIENT ACCESS TOKEN'\n DEV_ACCESS_TOKEN = 'YOUR API.AI AGENT DEVELOPER ACCESS TOKEN'\n\nclass ProdConfig(Config):\n \"\"\"Production configuration.\"\"\"\n\n ENV = 'prod'\n DEBUG = False\n\n\nclass DevConfig(Config):\n \"\"\"Development configuration.\"\"\"\n\n ENV = 'dev'\n DEBUG = True\n\nclass TestConfig(Config):\n \"\"\"Test configuration.\"\"\"\n\n TESTING = True\n DEBUG = True\n",
"step-ids": [
5,
9,
11,
13,
14
]
}
|
[
5,
9,
11,
13,
14
] |
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger('ucs')
def firmware_pack_create(handle, org_name, name, rack_bundle_version,
blade_bundle_version, descr="", mode="staged",
org_parent="org-root"):
"""
This method creates Host Firmware pack.
Args:
handle (UcsHandle)
org_name (string): Name of the organization
name (string): Name of the firmware pack.
rack_bundle_version (string): Rack bundle version
blade_bundle_version (string): Blade bundle version
mode (string): "one-sot" or "staged"
descr (string): Basic description.
org_parent (string): Parent of Org
Returns:
None
Example:
firmware_pack_create(handle, org_name="sample_org",
name="sample_fp",
rack_bundle_version="",
blade_bundle_version="")
"""
org_dn = org_parent + "/org-" + org_name
p_mo = handle.query_dn(org_dn)
if not p_mo:
log.info("Sub-Org <%s> not found!" % org_name)
else:
from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\
FirmwareComputeHostPack
mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,
name=name,
descr=descr,
rack_bundle_version=rack_bundle_version,
mode=mode,
blade_bundle_version=blade_bundle_version)
handle.add_mo(mo)
handle.commit()
def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,
blade_bundle_version=None, descr=None, mode=None,
org_parent="org-root"):
"""
This method creates Host Firmware pack.
Args:
handle (UcsHandle)
org_name (string): Name of the organization
name (string): Name of the firmware pack.
rack_bundle_version (string): Rack bundle version
blade_bundle_version (string): Blade bundle version
mode (string): "one-sot" or "staged"
descr (string): Basic description.
org_parent (string): Parent of Org
Returns:
None
Example:
firmware_pack_modify(handle, org_name="sample_org",
name="sample_fp",
rack_bundle_version="",
blade_bundle_version="")
"""
org_dn = org_parent + "/org-" + org_name
fw_dn= org_dn + "/fw-host-pack-" + name
mo = handle.query_dn(fw_dn)
if mo is not None:
if rack_bundle_version is not None:
mo.rack_bundle_version = rack_bundle_version
if blade_bundle_version is not None:
mo.blade_bundle_version = blade_bundle_version
if mode is not None:
mo.mode=mode
if descr is not None:
mo.descr = descr
handle.set_mo(mo)
handle.commit()
else:
log.info("Firmware host pack <%s> not found." % name)
def firmware_pack_remove(handle, org_name, name, org_parent="org-root"):
"""
This method removes Host Firmware pack.
Args:
handle (UcsHandle)
org_name (string): Name of the organization
name (string): Name of the firmware pack.
org_parent (string): Parent of Org.
Returns:
None
Example:
firmware_pack_remove(handle, org_name="sample_org",
name="sample_fp")
"""
org_dn = org_parent + "/org-" + org_name
p_mo = handle.query_dn(org_dn)
if not p_mo:
log.info("Sub-Org <%s> not found!" %org_name)
else:
fw_dn= org_dn + "/fw-host-pack-" + name
mo = handle.query_dn(fw_dn)
if not mo:
log.info("Firmware host pack <%s> not found.Nothing to remove" % name)
else:
handle.remove_mo(mo)
handle.commit()
|
normal
|
{
"blob_id": "21cfe1ca606d18763fbfb8ff6862c382b3321adc",
"index": 8511,
"step-1": "<mask token>\n\n\ndef firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr='', mode='staged', org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_create(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import FirmwareComputeHostPack\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn, name=name,\n descr=descr, rack_bundle_version=rack_bundle_version, mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()\n\n\ndef firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None, org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_modify(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode = mode\n if descr is not None:\n mo.descr = descr\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info('Firmware host pack <%s> not found.' % name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr='', mode='staged', org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_create(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import FirmwareComputeHostPack\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn, name=name,\n descr=descr, rack_bundle_version=rack_bundle_version, mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()\n\n\ndef firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None, org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_modify(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode = mode\n if descr is not None:\n mo.descr = descr\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info('Firmware host pack <%s> not found.' % name)\n\n\ndef firmware_pack_remove(handle, org_name, name, org_parent='org-root'):\n \"\"\"\n This method removes Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n org_parent (string): Parent of Org.\n\n Returns:\n None\n\n Example:\n firmware_pack_remove(handle, org_name=\"sample_org\",\n name=\"sample_fp\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info('Firmware host pack <%s> not found.Nothing to remove' %\n name)\n else:\n handle.remove_mo(mo)\n handle.commit()\n",
"step-3": "<mask token>\nlog = logging.getLogger('ucs')\n\n\ndef firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr='', mode='staged', org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_create(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import FirmwareComputeHostPack\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn, name=name,\n descr=descr, rack_bundle_version=rack_bundle_version, mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()\n\n\ndef firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None, org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_modify(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode = mode\n if descr is not None:\n mo.descr = descr\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info('Firmware host pack <%s> not found.' % name)\n\n\ndef firmware_pack_remove(handle, org_name, name, org_parent='org-root'):\n \"\"\"\n This method removes Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n org_parent (string): Parent of Org.\n\n Returns:\n None\n\n Example:\n firmware_pack_remove(handle, org_name=\"sample_org\",\n name=\"sample_fp\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info('Firmware host pack <%s> not found.Nothing to remove' %\n name)\n else:\n handle.remove_mo(mo)\n handle.commit()\n",
"step-4": "import logging\nlog = logging.getLogger('ucs')\n\n\ndef firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr='', mode='staged', org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_create(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import FirmwareComputeHostPack\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn, name=name,\n descr=descr, rack_bundle_version=rack_bundle_version, mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()\n\n\ndef firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None, org_parent='org-root'):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_modify(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode = mode\n if descr is not None:\n mo.descr = descr\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info('Firmware host pack <%s> not found.' % name)\n\n\ndef firmware_pack_remove(handle, org_name, name, org_parent='org-root'):\n \"\"\"\n This method removes Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n org_parent (string): Parent of Org.\n\n Returns:\n None\n\n Example:\n firmware_pack_remove(handle, org_name=\"sample_org\",\n name=\"sample_fp\")\n \"\"\"\n org_dn = org_parent + '/org-' + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info('Sub-Org <%s> not found!' % org_name)\n else:\n fw_dn = org_dn + '/fw-host-pack-' + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info('Firmware host pack <%s> not found.Nothing to remove' %\n name)\n else:\n handle.remove_mo(mo)\n handle.commit()\n",
"step-5": "# Copyright 2015 Cisco Systems, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nlog = logging.getLogger('ucs')\n\n\ndef firmware_pack_create(handle, org_name, name, rack_bundle_version,\n blade_bundle_version, descr=\"\", mode=\"staged\",\n org_parent=\"org-root\"):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_create(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" % org_name)\n else:\n from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import\\\n FirmwareComputeHostPack\n\n mo = FirmwareComputeHostPack(parent_mo_or_dn=org_dn,\n name=name,\n descr=descr,\n rack_bundle_version=rack_bundle_version,\n mode=mode,\n blade_bundle_version=blade_bundle_version)\n handle.add_mo(mo)\n handle.commit()\n\n\ndef firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None,\n org_parent=\"org-root\"):\n \"\"\"\n This method creates Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n rack_bundle_version (string): Rack bundle version\n blade_bundle_version (string): Blade bundle version\n mode (string): \"one-sot\" or \"staged\"\n descr (string): Basic description.\n org_parent (string): Parent of Org\n\n Returns:\n None\n\n Example:\n firmware_pack_modify(handle, org_name=\"sample_org\",\n name=\"sample_fp\",\n rack_bundle_version=\"\",\n blade_bundle_version=\"\")\n \"\"\"\n\n org_dn = org_parent + \"/org-\" + org_name\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode=mode\n if descr is not None:\n mo.descr = descr\n\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info(\"Firmware host pack <%s> not found.\" % name)\n\n\ndef firmware_pack_remove(handle, org_name, name, org_parent=\"org-root\"):\n\n \"\"\"\n This method removes Host Firmware pack.\n\n Args:\n handle (UcsHandle)\n org_name (string): Name of the organization\n name (string): Name of the firmware pack.\n org_parent (string): Parent of Org.\n\n Returns:\n None\n\n Example:\n firmware_pack_remove(handle, org_name=\"sample_org\",\n name=\"sample_fp\")\n \"\"\"\n org_dn = org_parent + \"/org-\" + org_name\n p_mo = handle.query_dn(org_dn)\n if not p_mo:\n log.info(\"Sub-Org <%s> not found!\" %org_name)\n else:\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if not mo:\n log.info(\"Firmware host pack <%s> not found.Nothing to remove\" % name)\n else:\n handle.remove_mo(mo)\n handle.commit()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas
def ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):
input_features, output_features = [], []
for p in features:
if (pandas.api.types.is_numeric_dtype(df[p])):
input_features.append({'name': p, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})
elif (pandas.api.types.is_string_dtype(df[p])):
input_features.append({'name': p, 'type': 'category'})
else:
raise TypeError(f'column {p} value isnt number or string')
if (pandas.api.types.is_numeric_dtype(df[target])):
output_features.append({'name': target, 'type': 'numerical',
'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})
elif (pandas.api.types.is_string_dtype(df[p])):
output_features.append({'name': target, 'type': 'category'})
else:
raise TypeError(f'column {target} value isnt number or string')
return {
'input_features' : input_features,
'output_features': output_features,
}
|
normal
|
{
"blob_id": "b7521a604fb49591df814d469f53d35574126fdb",
"index": 7609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):\n input_features, output_features = [], []\n for p in features:\n if pandas.api.types.is_numeric_dtype(df[p]):\n input_features.append({'name': p, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy':\n 'fill_with_mean', 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n input_features.append({'name': p, 'type': 'category'})\n else:\n raise TypeError(f'column {p} value isnt number or string')\n if pandas.api.types.is_numeric_dtype(df[target]):\n output_features.append({'name': target, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy': 'fill_with_mean',\n 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n output_features.append({'name': target, 'type': 'category'})\n else:\n raise TypeError(f'column {target} value isnt number or string')\n return {'input_features': input_features, 'output_features':\n output_features}\n",
"step-3": "import pandas\n\n\ndef ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):\n input_features, output_features = [], []\n for p in features:\n if pandas.api.types.is_numeric_dtype(df[p]):\n input_features.append({'name': p, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy':\n 'fill_with_mean', 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n input_features.append({'name': p, 'type': 'category'})\n else:\n raise TypeError(f'column {p} value isnt number or string')\n if pandas.api.types.is_numeric_dtype(df[target]):\n output_features.append({'name': target, 'type': 'numerical',\n 'preprocessing': {'missing_value_strategy': 'fill_with_mean',\n 'normalization': 'zscore'}})\n elif pandas.api.types.is_string_dtype(df[p]):\n output_features.append({'name': target, 'type': 'category'})\n else:\n raise TypeError(f'column {target} value isnt number or string')\n return {'input_features': input_features, 'output_features':\n output_features}\n",
"step-4": "import pandas\n\ndef ludwig_get_model_definition(df: 'Dataframe', target: str, features: list):\n input_features, output_features = [], []\n for p in features:\n if (pandas.api.types.is_numeric_dtype(df[p])):\n input_features.append({'name': p, 'type': 'numerical', \n 'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})\n elif (pandas.api.types.is_string_dtype(df[p])):\n input_features.append({'name': p, 'type': 'category'})\n else:\n raise TypeError(f'column {p} value isnt number or string')\n \n if (pandas.api.types.is_numeric_dtype(df[target])):\n output_features.append({'name': target, 'type': 'numerical', \n 'preprocessing': {'missing_value_strategy': 'fill_with_mean', 'normalization': 'zscore'}})\n elif (pandas.api.types.is_string_dtype(df[p])):\n output_features.append({'name': target, 'type': 'category'})\n else:\n raise TypeError(f'column {target} value isnt number or string')\n \n return {\n 'input_features' : input_features,\n 'output_features': output_features,\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
a, b, c = map(int, input().split())
frag = 'NO'
for i in range(b - 1):
if (i + 1) * a % b == c:
frag = 'YES'
break
print(frag)
|
normal
|
{
"blob_id": "6ad36f2b115c822a50a38e88a8d7d524fc5b045b",
"index": 195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(b - 1):\n if (i + 1) * a % b == c:\n frag = 'YES'\n break\nprint(frag)\n",
"step-3": "a, b, c = map(int, input().split())\nfrag = 'NO'\nfor i in range(b - 1):\n if (i + 1) * a % b == c:\n frag = 'YES'\n break\nprint(frag)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class RuyConan(ConanFile):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def configure(self):
if self.options.shared:
self.options.rm_safe('fPIC')
def requirements(self):
self.requires('cpuinfo/cci.20220228')
<|reserved_special_token_0|>
def source(self):
get(self, **self.conan_data['sources'][self.version], strip_root=True)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RuyConan(ConanFile):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def _minimum_compilers_version(self):
return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':
'3.4', 'apple-clang': '5.1'}
def validate(self):
if self.settings.compiler.get_safe('cppstd'):
check_min_cppstd(self, 14)
minimum_version = self._minimum_compilers_version.get(str(self.
settings.compiler), False)
if not minimum_version:
self.output.warning(
'Compiler is unknown. Assuming it supports C++14.')
elif Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
'Build requires support for C++14. Minimum version for {} is {}'
.format(str(self.settings.compiler), minimum_version))
if str(self.settings.compiler) == 'clang' and Version(self.settings
.compiler.version) <= 5 and self.settings.build_type == 'Debug':
raise ConanInvalidConfiguration(
'Debug builds are not supported on older versions of Clang (<=5)'
)
def config_options(self):
if self.settings.os == 'Windows':
self.options.rm_safe('fPIC')
def configure(self):
if self.options.shared:
self.options.rm_safe('fPIC')
def requirements(self):
self.requires('cpuinfo/cci.20220228')
<|reserved_special_token_0|>
def source(self):
get(self, **self.conan_data['sources'][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables['RUY_MINIMAL_BUILD'] = True
tc.cache_variables['RUY_FIND_CPUINFO'] = True
tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')
patches = {'cmake_minimum_required(VERSION 3.13)': '',
'# Copyright 2021 Google LLC':
"""# Copyright 2021 Google LLC
cmake_minimum_required(VERSION 3.13)"""
}
for pattern, patch in patches.items():
replace_in_file(self, cmakelists, pattern, patch)
replace_in_file(self, os.path.join(self.source_folder, 'cmake',
'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',
'add_library(${_NAME}')
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, 'LICENSE', dst=os.path.join(self.package_folder,
'licenses'), src=self.source_folder)
rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))
def package_info(self):
self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',
'ruy_thread_pool', 'ruy_blocking_counter',
'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',
'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',
'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',
'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',
'ruy_have_built_path_for_avx2_fma',
'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',
'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',
'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',
'ruy_pack_avx512', 'ruy_system_aligned_alloc',
'ruy_profiler_instrumentation', 'ruy_profiler_profiler']
if self.settings.os in ['Linux', 'FreeBSD']:
self.cpp_info.system_libs.extend(['m', 'pthread'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RuyConan(ConanFile):
name = 'ruy'
description = """ruy is a matrix multiplication library.
Its focus is to cover the matrix multiplication needs of neural network inference engines
"""
url = 'https://github.com/conan-io/conan-center-index'
homepage = 'https://github.com/google/ruy'
license = 'Apache-2.0'
topics = ('matrix', 'multiplication', 'neural', 'network', 'AI',
'tensorflow')
settings = 'os', 'arch', 'compiler', 'build_type'
options = {'shared': [True, False], 'fPIC': [True, False]}
default_options = {'shared': False, 'fPIC': True}
@property
def _minimum_compilers_version(self):
return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':
'3.4', 'apple-clang': '5.1'}
def validate(self):
if self.settings.compiler.get_safe('cppstd'):
check_min_cppstd(self, 14)
minimum_version = self._minimum_compilers_version.get(str(self.
settings.compiler), False)
if not minimum_version:
self.output.warning(
'Compiler is unknown. Assuming it supports C++14.')
elif Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
'Build requires support for C++14. Minimum version for {} is {}'
.format(str(self.settings.compiler), minimum_version))
if str(self.settings.compiler) == 'clang' and Version(self.settings
.compiler.version) <= 5 and self.settings.build_type == 'Debug':
raise ConanInvalidConfiguration(
'Debug builds are not supported on older versions of Clang (<=5)'
)
def config_options(self):
if self.settings.os == 'Windows':
self.options.rm_safe('fPIC')
def configure(self):
if self.options.shared:
self.options.rm_safe('fPIC')
def requirements(self):
self.requires('cpuinfo/cci.20220228')
def layout(self):
cmake_layout(self, src_folder='src')
def source(self):
get(self, **self.conan_data['sources'][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables['RUY_MINIMAL_BUILD'] = True
tc.cache_variables['RUY_FIND_CPUINFO'] = True
tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')
patches = {'cmake_minimum_required(VERSION 3.13)': '',
'# Copyright 2021 Google LLC':
"""# Copyright 2021 Google LLC
cmake_minimum_required(VERSION 3.13)"""
}
for pattern, patch in patches.items():
replace_in_file(self, cmakelists, pattern, patch)
replace_in_file(self, os.path.join(self.source_folder, 'cmake',
'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',
'add_library(${_NAME}')
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, 'LICENSE', dst=os.path.join(self.package_folder,
'licenses'), src=self.source_folder)
rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))
def package_info(self):
self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',
'ruy_thread_pool', 'ruy_blocking_counter',
'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',
'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',
'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',
'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',
'ruy_have_built_path_for_avx2_fma',
'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',
'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',
'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',
'ruy_pack_avx512', 'ruy_system_aligned_alloc',
'ruy_profiler_instrumentation', 'ruy_profiler_profiler']
if self.settings.os in ['Linux', 'FreeBSD']:
self.cpp_info.system_libs.extend(['m', 'pthread'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
required_conan_version = '>=1.57.0'
class RuyConan(ConanFile):
name = 'ruy'
description = """ruy is a matrix multiplication library.
Its focus is to cover the matrix multiplication needs of neural network inference engines
"""
url = 'https://github.com/conan-io/conan-center-index'
homepage = 'https://github.com/google/ruy'
license = 'Apache-2.0'
topics = ('matrix', 'multiplication', 'neural', 'network', 'AI',
'tensorflow')
settings = 'os', 'arch', 'compiler', 'build_type'
options = {'shared': [True, False], 'fPIC': [True, False]}
default_options = {'shared': False, 'fPIC': True}
@property
def _minimum_compilers_version(self):
return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':
'3.4', 'apple-clang': '5.1'}
def validate(self):
if self.settings.compiler.get_safe('cppstd'):
check_min_cppstd(self, 14)
minimum_version = self._minimum_compilers_version.get(str(self.
settings.compiler), False)
if not minimum_version:
self.output.warning(
'Compiler is unknown. Assuming it supports C++14.')
elif Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
'Build requires support for C++14. Minimum version for {} is {}'
.format(str(self.settings.compiler), minimum_version))
if str(self.settings.compiler) == 'clang' and Version(self.settings
.compiler.version) <= 5 and self.settings.build_type == 'Debug':
raise ConanInvalidConfiguration(
'Debug builds are not supported on older versions of Clang (<=5)'
)
def config_options(self):
if self.settings.os == 'Windows':
self.options.rm_safe('fPIC')
def configure(self):
if self.options.shared:
self.options.rm_safe('fPIC')
def requirements(self):
self.requires('cpuinfo/cci.20220228')
def layout(self):
cmake_layout(self, src_folder='src')
def source(self):
get(self, **self.conan_data['sources'][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables['RUY_MINIMAL_BUILD'] = True
tc.cache_variables['RUY_FIND_CPUINFO'] = True
tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')
patches = {'cmake_minimum_required(VERSION 3.13)': '',
'# Copyright 2021 Google LLC':
"""# Copyright 2021 Google LLC
cmake_minimum_required(VERSION 3.13)"""
}
for pattern, patch in patches.items():
replace_in_file(self, cmakelists, pattern, patch)
replace_in_file(self, os.path.join(self.source_folder, 'cmake',
'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',
'add_library(${_NAME}')
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, 'LICENSE', dst=os.path.join(self.package_folder,
'licenses'), src=self.source_folder)
rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))
def package_info(self):
self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',
'ruy_thread_pool', 'ruy_blocking_counter',
'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',
'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',
'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',
'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',
'ruy_have_built_path_for_avx2_fma',
'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',
'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',
'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',
'ruy_pack_avx512', 'ruy_system_aligned_alloc',
'ruy_profiler_instrumentation', 'ruy_profiler_profiler']
if self.settings.os in ['Linux', 'FreeBSD']:
self.cpp_info.system_libs.extend(['m', 'pthread'])
<|reserved_special_token_1|>
import os
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import copy, get, replace_in_file, rmdir
from conan.tools.scm import Version
from conan.errors import ConanInvalidConfiguration
required_conan_version = ">=1.57.0"
class RuyConan(ConanFile):
name = "ruy"
description = "ruy is a matrix multiplication library.\n" \
"Its focus is to cover the matrix multiplication needs of neural network inference engines\n"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/google/ruy"
license = "Apache-2.0"
topics = ("matrix", "multiplication", "neural", "network", "AI", "tensorflow")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "15",
"msvc": "191",
"gcc": "5",
"clang": "3.4",
"apple-clang": "5.1",
}
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 14)
minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)
if not minimum_version:
self.output.warning("Compiler is unknown. Assuming it supports C++14.")
elif Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration("Build requires support for C++14. Minimum version for {} is {}"
.format(str(self.settings.compiler), minimum_version))
if str(self.settings.compiler) == "clang" and Version(self.settings.compiler.version) <= 5 and self.settings.build_type == "Debug":
raise ConanInvalidConfiguration("Debug builds are not supported on older versions of Clang (<=5)")
def config_options(self):
if self.settings.os == "Windows":
self.options.rm_safe("fPIC")
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def requirements(self):
self.requires("cpuinfo/cci.20220228")
def layout(self):
cmake_layout(self, src_folder="src")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.cache_variables["RUY_MINIMAL_BUILD"] = True
tc.cache_variables["RUY_FIND_CPUINFO"] = True
# Ruy public headers don't have API decorators,
# export everything to support shared libraries on Windows
tc.variables["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def _patch_sources(self):
cmakelists = os.path.join(self.source_folder, "CMakeLists.txt")
patches = {
#Remove the invocation after project(), see https://github.com/google/ruy/issues/328
"cmake_minimum_required(VERSION 3.13)": "",
# Ensure `cmake_minimum_required` is called first
"# Copyright 2021 Google LLC": "# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)",
}
for pattern, patch in patches.items():
replace_in_file(self, cmakelists, pattern, patch)
# 1. Allow Shared builds
replace_in_file(self, os.path.join(self.source_folder, "cmake", "ruy_cc_library.cmake"),
"add_library(${_NAME} STATIC",
"add_library(${_NAME}"
)
def build(self):
self._patch_sources()
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
copy(self, "LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.libs = ["ruy_frontend",
"ruy_context",
"ruy_trmul",
"ruy_thread_pool",
"ruy_blocking_counter",
"ruy_prepare_packed_matrices",
"ruy_ctx",
"ruy_allocator",
"ruy_prepacked_cache",
"ruy_tune",
"ruy_wait",
"ruy_apply_multiplier",
"ruy_block_map",
"ruy_context_get_ctx",
"ruy_cpuinfo",
"ruy_denormal",
"ruy_have_built_path_for_avx",
"ruy_have_built_path_for_avx2_fma",
"ruy_have_built_path_for_avx512",
"ruy_kernel_arm",
"ruy_kernel_avx",
"ruy_kernel_avx2_fma",
"ruy_kernel_avx512",
"ruy_pack_arm",
"ruy_pack_avx",
"ruy_pack_avx2_fma",
"ruy_pack_avx512",
"ruy_system_aligned_alloc",
"ruy_profiler_instrumentation",
"ruy_profiler_profiler"
]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["m", "pthread"])
|
flexible
|
{
"blob_id": "fe1c499efe492dbd4f5c9b99bd6339c503c7902b",
"index": 5766,
"step-1": "<mask token>\n\n\nclass RuyConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n <mask token>\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RuyConan(ConanFile):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def _minimum_compilers_version(self):\n return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':\n '3.4', 'apple-clang': '5.1'}\n\n def validate(self):\n if self.settings.compiler.get_safe('cppstd'):\n check_min_cppstd(self, 14)\n minimum_version = self._minimum_compilers_version.get(str(self.\n settings.compiler), False)\n if not minimum_version:\n self.output.warning(\n 'Compiler is unknown. Assuming it supports C++14.')\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n 'Build requires support for C++14. Minimum version for {} is {}'\n .format(str(self.settings.compiler), minimum_version))\n if str(self.settings.compiler) == 'clang' and Version(self.settings\n .compiler.version) <= 5 and self.settings.build_type == 'Debug':\n raise ConanInvalidConfiguration(\n 'Debug builds are not supported on older versions of Clang (<=5)'\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.rm_safe('fPIC')\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n <mask token>\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables['RUY_MINIMAL_BUILD'] = True\n tc.cache_variables['RUY_FIND_CPUINFO'] = True\n tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True\n tc.generate()\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')\n patches = {'cmake_minimum_required(VERSION 3.13)': '',\n '# Copyright 2021 Google LLC':\n \"\"\"# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)\"\"\"\n }\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n replace_in_file(self, os.path.join(self.source_folder, 'cmake',\n 'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',\n 'add_library(${_NAME}')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, 'LICENSE', dst=os.path.join(self.package_folder,\n 'licenses'), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))\n\n def package_info(self):\n self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',\n 'ruy_thread_pool', 'ruy_blocking_counter',\n 'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',\n 'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',\n 'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',\n 'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',\n 'ruy_have_built_path_for_avx2_fma',\n 'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',\n 'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',\n 'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',\n 'ruy_pack_avx512', 'ruy_system_aligned_alloc',\n 'ruy_profiler_instrumentation', 'ruy_profiler_profiler']\n if self.settings.os in ['Linux', 'FreeBSD']:\n self.cpp_info.system_libs.extend(['m', 'pthread'])\n",
"step-3": "<mask token>\n\n\nclass RuyConan(ConanFile):\n name = 'ruy'\n description = \"\"\"ruy is a matrix multiplication library.\nIts focus is to cover the matrix multiplication needs of neural network inference engines\n\"\"\"\n url = 'https://github.com/conan-io/conan-center-index'\n homepage = 'https://github.com/google/ruy'\n license = 'Apache-2.0'\n topics = ('matrix', 'multiplication', 'neural', 'network', 'AI',\n 'tensorflow')\n settings = 'os', 'arch', 'compiler', 'build_type'\n options = {'shared': [True, False], 'fPIC': [True, False]}\n default_options = {'shared': False, 'fPIC': True}\n\n @property\n def _minimum_compilers_version(self):\n return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':\n '3.4', 'apple-clang': '5.1'}\n\n def validate(self):\n if self.settings.compiler.get_safe('cppstd'):\n check_min_cppstd(self, 14)\n minimum_version = self._minimum_compilers_version.get(str(self.\n settings.compiler), False)\n if not minimum_version:\n self.output.warning(\n 'Compiler is unknown. Assuming it supports C++14.')\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n 'Build requires support for C++14. Minimum version for {} is {}'\n .format(str(self.settings.compiler), minimum_version))\n if str(self.settings.compiler) == 'clang' and Version(self.settings\n .compiler.version) <= 5 and self.settings.build_type == 'Debug':\n raise ConanInvalidConfiguration(\n 'Debug builds are not supported on older versions of Clang (<=5)'\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.rm_safe('fPIC')\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n\n def layout(self):\n cmake_layout(self, src_folder='src')\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables['RUY_MINIMAL_BUILD'] = True\n tc.cache_variables['RUY_FIND_CPUINFO'] = True\n tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True\n tc.generate()\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')\n patches = {'cmake_minimum_required(VERSION 3.13)': '',\n '# Copyright 2021 Google LLC':\n \"\"\"# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)\"\"\"\n }\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n replace_in_file(self, os.path.join(self.source_folder, 'cmake',\n 'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',\n 'add_library(${_NAME}')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, 'LICENSE', dst=os.path.join(self.package_folder,\n 'licenses'), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))\n\n def package_info(self):\n self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',\n 'ruy_thread_pool', 'ruy_blocking_counter',\n 'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',\n 'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',\n 'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',\n 'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',\n 'ruy_have_built_path_for_avx2_fma',\n 'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',\n 'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',\n 'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',\n 'ruy_pack_avx512', 'ruy_system_aligned_alloc',\n 'ruy_profiler_instrumentation', 'ruy_profiler_profiler']\n if self.settings.os in ['Linux', 'FreeBSD']:\n self.cpp_info.system_libs.extend(['m', 'pthread'])\n",
"step-4": "<mask token>\nrequired_conan_version = '>=1.57.0'\n\n\nclass RuyConan(ConanFile):\n name = 'ruy'\n description = \"\"\"ruy is a matrix multiplication library.\nIts focus is to cover the matrix multiplication needs of neural network inference engines\n\"\"\"\n url = 'https://github.com/conan-io/conan-center-index'\n homepage = 'https://github.com/google/ruy'\n license = 'Apache-2.0'\n topics = ('matrix', 'multiplication', 'neural', 'network', 'AI',\n 'tensorflow')\n settings = 'os', 'arch', 'compiler', 'build_type'\n options = {'shared': [True, False], 'fPIC': [True, False]}\n default_options = {'shared': False, 'fPIC': True}\n\n @property\n def _minimum_compilers_version(self):\n return {'Visual Studio': '15', 'msvc': '191', 'gcc': '5', 'clang':\n '3.4', 'apple-clang': '5.1'}\n\n def validate(self):\n if self.settings.compiler.get_safe('cppstd'):\n check_min_cppstd(self, 14)\n minimum_version = self._minimum_compilers_version.get(str(self.\n settings.compiler), False)\n if not minimum_version:\n self.output.warning(\n 'Compiler is unknown. Assuming it supports C++14.')\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\n 'Build requires support for C++14. Minimum version for {} is {}'\n .format(str(self.settings.compiler), minimum_version))\n if str(self.settings.compiler) == 'clang' and Version(self.settings\n .compiler.version) <= 5 and self.settings.build_type == 'Debug':\n raise ConanInvalidConfiguration(\n 'Debug builds are not supported on older versions of Clang (<=5)'\n )\n\n def config_options(self):\n if self.settings.os == 'Windows':\n self.options.rm_safe('fPIC')\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe('fPIC')\n\n def requirements(self):\n self.requires('cpuinfo/cci.20220228')\n\n def layout(self):\n cmake_layout(self, src_folder='src')\n\n def source(self):\n get(self, **self.conan_data['sources'][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables['RUY_MINIMAL_BUILD'] = True\n tc.cache_variables['RUY_FIND_CPUINFO'] = True\n tc.variables['CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS'] = True\n tc.generate()\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, 'CMakeLists.txt')\n patches = {'cmake_minimum_required(VERSION 3.13)': '',\n '# Copyright 2021 Google LLC':\n \"\"\"# Copyright 2021 Google LLC\ncmake_minimum_required(VERSION 3.13)\"\"\"\n }\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n replace_in_file(self, os.path.join(self.source_folder, 'cmake',\n 'ruy_cc_library.cmake'), 'add_library(${_NAME} STATIC',\n 'add_library(${_NAME}')\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, 'LICENSE', dst=os.path.join(self.package_folder,\n 'licenses'), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, 'lib', 'cmake'))\n\n def package_info(self):\n self.cpp_info.libs = ['ruy_frontend', 'ruy_context', 'ruy_trmul',\n 'ruy_thread_pool', 'ruy_blocking_counter',\n 'ruy_prepare_packed_matrices', 'ruy_ctx', 'ruy_allocator',\n 'ruy_prepacked_cache', 'ruy_tune', 'ruy_wait',\n 'ruy_apply_multiplier', 'ruy_block_map', 'ruy_context_get_ctx',\n 'ruy_cpuinfo', 'ruy_denormal', 'ruy_have_built_path_for_avx',\n 'ruy_have_built_path_for_avx2_fma',\n 'ruy_have_built_path_for_avx512', 'ruy_kernel_arm',\n 'ruy_kernel_avx', 'ruy_kernel_avx2_fma', 'ruy_kernel_avx512',\n 'ruy_pack_arm', 'ruy_pack_avx', 'ruy_pack_avx2_fma',\n 'ruy_pack_avx512', 'ruy_system_aligned_alloc',\n 'ruy_profiler_instrumentation', 'ruy_profiler_profiler']\n if self.settings.os in ['Linux', 'FreeBSD']:\n self.cpp_info.system_libs.extend(['m', 'pthread'])\n",
"step-5": "import os\nfrom conan import ConanFile\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout\nfrom conan.tools.files import copy, get, replace_in_file, rmdir\nfrom conan.tools.scm import Version\nfrom conan.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.57.0\"\n\n\nclass RuyConan(ConanFile):\n name = \"ruy\"\n description = \"ruy is a matrix multiplication library.\\n\" \\\n \"Its focus is to cover the matrix multiplication needs of neural network inference engines\\n\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/google/ruy\"\n license = \"Apache-2.0\"\n topics = (\"matrix\", \"multiplication\", \"neural\", \"network\", \"AI\", \"tensorflow\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n @property\n def _minimum_compilers_version(self):\n return {\n \"Visual Studio\": \"15\",\n \"msvc\": \"191\", \n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"5.1\",\n }\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, 14)\n\n minimum_version = self._minimum_compilers_version.get(str(self.settings.compiler), False)\n if not minimum_version:\n self.output.warning(\"Compiler is unknown. Assuming it supports C++14.\")\n elif Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\"Build requires support for C++14. Minimum version for {} is {}\"\n .format(str(self.settings.compiler), minimum_version))\n\n if str(self.settings.compiler) == \"clang\" and Version(self.settings.compiler.version) <= 5 and self.settings.build_type == \"Debug\":\n raise ConanInvalidConfiguration(\"Debug builds are not supported on older versions of Clang (<=5)\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n self.options.rm_safe(\"fPIC\")\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n\n def requirements(self):\n self.requires(\"cpuinfo/cci.20220228\")\n\n def layout(self):\n cmake_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.cache_variables[\"RUY_MINIMAL_BUILD\"] = True\n tc.cache_variables[\"RUY_FIND_CPUINFO\"] = True\n # Ruy public headers don't have API decorators,\n # export everything to support shared libraries on Windows\n tc.variables[\"CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS\"] = True\n tc.generate()\n\n deps = CMakeDeps(self)\n deps.generate()\n\n def _patch_sources(self):\n cmakelists = os.path.join(self.source_folder, \"CMakeLists.txt\")\n patches = {\n #Remove the invocation after project(), see https://github.com/google/ruy/issues/328\n \"cmake_minimum_required(VERSION 3.13)\": \"\",\n # Ensure `cmake_minimum_required` is called first \n \"# Copyright 2021 Google LLC\": \"# Copyright 2021 Google LLC\\ncmake_minimum_required(VERSION 3.13)\", \n }\n\n for pattern, patch in patches.items():\n replace_in_file(self, cmakelists, pattern, patch)\n\n # 1. Allow Shared builds\n replace_in_file(self, os.path.join(self.source_folder, \"cmake\", \"ruy_cc_library.cmake\"),\n \"add_library(${_NAME} STATIC\",\n \"add_library(${_NAME}\"\n )\n\n def build(self):\n self._patch_sources()\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n cmake = CMake(self)\n cmake.install()\n copy(self, \"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n rmdir(self, os.path.join(self.package_folder, \"lib\", \"cmake\"))\n\n def package_info(self):\n self.cpp_info.libs = [\"ruy_frontend\",\n \"ruy_context\",\n \"ruy_trmul\",\n \"ruy_thread_pool\",\n \"ruy_blocking_counter\",\n \"ruy_prepare_packed_matrices\",\n \"ruy_ctx\",\n \"ruy_allocator\",\n \"ruy_prepacked_cache\",\n \"ruy_tune\",\n \"ruy_wait\",\n \"ruy_apply_multiplier\",\n \"ruy_block_map\",\n \"ruy_context_get_ctx\",\n \"ruy_cpuinfo\",\n \"ruy_denormal\",\n \"ruy_have_built_path_for_avx\",\n \"ruy_have_built_path_for_avx2_fma\",\n \"ruy_have_built_path_for_avx512\",\n \"ruy_kernel_arm\",\n \"ruy_kernel_avx\",\n \"ruy_kernel_avx2_fma\",\n \"ruy_kernel_avx512\",\n \"ruy_pack_arm\",\n \"ruy_pack_avx\",\n \"ruy_pack_avx2_fma\",\n \"ruy_pack_avx512\",\n \"ruy_system_aligned_alloc\",\n \"ruy_profiler_instrumentation\",\n \"ruy_profiler_profiler\"\n ]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n self.cpp_info.system_libs.extend([\"m\", \"pthread\"])\n",
"step-ids": [
4,
12,
14,
15,
17
]
}
|
[
4,
12,
14,
15,
17
] |
<|reserved_special_token_0|>
def heapify(array, size, ind):
largest = ind
left = 2 * ind + 1
right = 2 * ind + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'Исходный список: {mas}')
def heapify(array, size, ind):
largest = ind
left = 2 * ind + 1
right = 2 * ind + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
heap_sort(mas)
print(f'Отсортированный список по возрастанию: {mas}')
print(f'Медиана: {mas[len(mas) // 2]}')
<|reserved_special_token_0|>
print(statistics.median(mas))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
m = random.randint(5, 10)
mas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]
print(f'Исходный список: {mas}')
def heapify(array, size, ind):
largest = ind
left = 2 * ind + 1
right = 2 * ind + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
heap_sort(mas)
print(f'Отсортированный список по возрастанию: {mas}')
print(f'Медиана: {mas[len(mas) // 2]}')
<|reserved_special_token_0|>
print(statistics.median(mas))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import random
m = random.randint(5, 10)
mas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]
print(f'Исходный список: {mas}')
def heapify(array, size, ind):
largest = ind
left = 2 * ind + 1
right = 2 * ind + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
heap_sort(mas)
print(f'Отсортированный список по возрастанию: {mas}')
print(f'Медиана: {mas[len(mas) // 2]}')
import statistics
print(statistics.median(mas))
<|reserved_special_token_1|>
"""
Массив размером 2m + 1, где m — натуральное число, заполнен случайным образом. Найдите в массиве медиану.
Медианой называется элемент ряда, делящий его на две равные части:
в одной находятся элементы, которые не меньше медианы, в другой — не больше медианы.
Примечание: задачу можно решить без сортировки исходного массива.
Но если это слишком сложно, используйте метод сортировки, который не рассматривался на уроках
(сортировка слиянием также недопустима).
"""
"""В этой задаче как раз могла бы пригодиться быстрая сортировка Хоара или слиянием.
"Но без них не знаю, как можно написать более менее оптимизировано"""
import random
m = random.randint(5, 10)
# "одномерный вещественный массив, заданный случайными числами на промежутке [0; 50)" - т.е. [0; 49].
# Не знаю, важно ли это. uniform включает последнее число, в отличии от range и большинства прочих функций
# Для лучшей читабельности добавил округление
mas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]
print(f'Исходный список: {mas}')
# Через сортировку кучей
def heapify(array, size, ind):
largest = ind
left = (2 * ind) + 1
right = (2 * ind) + 2
if left < size and array[left] > array[largest]:
largest = left
if right < size and array[right] > array[largest]:
largest = right
if largest != ind:
array[ind], array[largest] = array[largest], array[ind]
heapify(array, size, largest)
def heap_sort(array):
n = len(array)
for i in range(n, -1, -1):
heapify(array, n, i)
for i in range(n - 1, 0, -1):
array[i], array[0] = array[0], array[i]
heapify(array, i, 0)
heap_sort(mas)
print(f'Отсортированный список по возрастанию: {mas}')
print(f'Медиана: {mas[len(mas) // 2]}')
# Читерский вариант :)
import statistics
print(statistics.median(mas))
|
flexible
|
{
"blob_id": "fbcbad9f64c0f9b68e29afde01f3a4fdba012e10",
"index": 4868,
"step-1": "<mask token>\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(f'Исходный список: {mas}')\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\n<mask token>\nprint(statistics.median(mas))\n",
"step-3": "<mask token>\nm = random.randint(5, 10)\nmas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]\nprint(f'Исходный список: {mas}')\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\n<mask token>\nprint(statistics.median(mas))\n",
"step-4": "<mask token>\nimport random\nm = random.randint(5, 10)\nmas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]\nprint(f'Исходный список: {mas}')\n\n\ndef heapify(array, size, ind):\n largest = ind\n left = 2 * ind + 1\n right = 2 * ind + 2\n if left < size and array[left] > array[largest]:\n largest = left\n if right < size and array[right] > array[largest]:\n largest = right\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\nimport statistics\nprint(statistics.median(mas))\n",
"step-5": "\"\"\"\nМассив размером 2m + 1, где m — натуральное число, заполнен случайным образом. Найдите в массиве медиану.\nМедианой называется элемент ряда, делящий его на две равные части:\nв одной находятся элементы, которые не меньше медианы, в другой — не больше медианы.\nПримечание: задачу можно решить без сортировки исходного массива.\nНо если это слишком сложно, используйте метод сортировки, который не рассматривался на уроках\n(сортировка слиянием также недопустима).\n\"\"\"\n\n\n\"\"\"В этой задаче как раз могла бы пригодиться быстрая сортировка Хоара или слиянием.\n\"Но без них не знаю, как можно написать более менее оптимизировано\"\"\"\n\nimport random\n\nm = random.randint(5, 10)\n# \"одномерный вещественный массив, заданный случайными числами на промежутке [0; 50)\" - т.е. [0; 49].\n# Не знаю, важно ли это. uniform включает последнее число, в отличии от range и большинства прочих функций\n# Для лучшей читабельности добавил округление\nmas = [round(random.uniform(0, 49), 3) for i in range(2 * m + 1)]\nprint(f'Исходный список: {mas}')\n\n\n# Через сортировку кучей\ndef heapify(array, size, ind):\n largest = ind\n left = (2 * ind) + 1\n right = (2 * ind) + 2\n\n if left < size and array[left] > array[largest]:\n largest = left\n\n if right < size and array[right] > array[largest]:\n largest = right\n\n if largest != ind:\n array[ind], array[largest] = array[largest], array[ind]\n heapify(array, size, largest)\n\n\ndef heap_sort(array):\n n = len(array)\n for i in range(n, -1, -1):\n heapify(array, n, i)\n\n for i in range(n - 1, 0, -1):\n array[i], array[0] = array[0], array[i]\n heapify(array, i, 0)\n\n\nheap_sort(mas)\nprint(f'Отсортированный список по возрастанию: {mas}')\nprint(f'Медиана: {mas[len(mas) // 2]}')\n\n\n# Читерский вариант :)\nimport statistics\n\nprint(statistics.median(mas))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .facebook import *
|
flexible
|
{
"blob_id": "7901a2bd4ae1070c8263d3cd97351b01ffbf7bb1",
"index": 7246,
"step-1": "<mask token>\n",
"step-2": "from .facebook import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def is_list_equal(a, b):
if a == b:
print('CORRECT answer! :) ')
time.sleep(2)
print('See you next time !')
time.sleep(3)
return True
else:
print('This is a WRONG answer !')
time.sleep(2)
print('See you next time ! :)')
time.sleep(3)
return False
def play_memory_game(user_input):
print('****** Welcome to the Memory Game! ******' + '\n')
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_sequence(difficulty):
print('Try to remember the numbers! : ')
random_list = random.sample(range(1, 101), difficulty)
time.sleep(2)
print(random_list)
time.sleep(0.7)
os.system('cls')
time.sleep(3)
return random_list
<|reserved_special_token_0|>
def is_list_equal(a, b):
if a == b:
print('CORRECT answer! :) ')
time.sleep(2)
print('See you next time !')
time.sleep(3)
return True
else:
print('This is a WRONG answer !')
time.sleep(2)
print('See you next time ! :)')
time.sleep(3)
return False
def play_memory_game(user_input):
print('****** Welcome to the Memory Game! ******' + '\n')
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_sequence(difficulty):
print('Try to remember the numbers! : ')
random_list = random.sample(range(1, 101), difficulty)
time.sleep(2)
print(random_list)
time.sleep(0.7)
os.system('cls')
time.sleep(3)
return random_list
def get_list_from_user(difficulty):
print(
'WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : '
)
user_list = []
for i in range(0, difficulty):
user_num = int(input('num: '))
user_list.append(user_num)
print('Your chosen numbers are : ' + str(user_list))
time.sleep(3)
return user_list
def is_list_equal(a, b):
if a == b:
print('CORRECT answer! :) ')
time.sleep(2)
print('See you next time !')
time.sleep(3)
return True
else:
print('This is a WRONG answer !')
time.sleep(2)
print('See you next time ! :)')
time.sleep(3)
return False
def play_memory_game(user_input):
print('****** Welcome to the Memory Game! ******' + '\n')
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
<|reserved_special_token_1|>
import time
import os
import random
def generate_sequence(difficulty):
print('Try to remember the numbers! : ')
random_list = random.sample(range(1, 101), difficulty)
time.sleep(2)
print(random_list)
time.sleep(0.7)
os.system('cls')
time.sleep(3)
return random_list
def get_list_from_user(difficulty):
print(
'WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : '
)
user_list = []
for i in range(0, difficulty):
user_num = int(input('num: '))
user_list.append(user_num)
print('Your chosen numbers are : ' + str(user_list))
time.sleep(3)
return user_list
def is_list_equal(a, b):
if a == b:
print('CORRECT answer! :) ')
time.sleep(2)
print('See you next time !')
time.sleep(3)
return True
else:
print('This is a WRONG answer !')
time.sleep(2)
print('See you next time ! :)')
time.sleep(3)
return False
def play_memory_game(user_input):
print('****** Welcome to the Memory Game! ******' + '\n')
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
<|reserved_special_token_1|>
import time
import os
import random
def generate_sequence(difficulty):
print("Try to remember the numbers! : ")
random_list = random.sample(range(1, 101), difficulty)
time.sleep(2)
print(random_list)
time.sleep(0.7)
os.system('cls')
time.sleep(3)
return random_list
def get_list_from_user(difficulty):
print("WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : ")
user_list = []
for i in range(0, difficulty):
user_num = int(input('num: '))
user_list.append(user_num)
print("Your chosen numbers are : " + str(user_list))
time.sleep(3)
return user_list
def is_list_equal(a, b):
if a == b:
print("CORRECT answer! :) ")
time.sleep(2)
print("See you next time !")
time.sleep(3)
return True
else:
print("This is a WRONG answer !")
time.sleep(2)
print("See you next time ! :)")
time.sleep(3)
return False
def play_memory_game(user_input):
print("****** Welcome to the Memory Game! ******" + "\n")
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
|
flexible
|
{
"blob_id": "bff9fb50f1901094c9ab3d61566509835c774f21",
"index": 6776,
"step-1": "<mask token>\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-2": "<mask token>\n\n\ndef generate_sequence(difficulty):\n print('Try to remember the numbers! : ')\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\n<mask token>\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-3": "<mask token>\n\n\ndef generate_sequence(difficulty):\n print('Try to remember the numbers! : ')\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\ndef get_list_from_user(difficulty):\n print(\n 'WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : '\n )\n user_list = []\n for i in range(0, difficulty):\n user_num = int(input('num: '))\n user_list.append(user_num)\n print('Your chosen numbers are : ' + str(user_list))\n time.sleep(3)\n return user_list\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-4": "import time\nimport os\nimport random\n\n\ndef generate_sequence(difficulty):\n print('Try to remember the numbers! : ')\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\ndef get_list_from_user(difficulty):\n print(\n 'WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : '\n )\n user_list = []\n for i in range(0, difficulty):\n user_num = int(input('num: '))\n user_list.append(user_num)\n print('Your chosen numbers are : ' + str(user_list))\n time.sleep(3)\n return user_list\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-5": "import time\nimport os\nimport random\n\n\ndef generate_sequence(difficulty):\n print(\"Try to remember the numbers! : \")\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\ndef get_list_from_user(difficulty):\n print(\"WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : \")\n user_list = []\n for i in range(0, difficulty):\n user_num = int(input('num: '))\n user_list.append(user_num)\n print(\"Your chosen numbers are : \" + str(user_list))\n time.sleep(3)\n return user_list\n\n\ndef is_list_equal(a, b):\n if a == b:\n print(\"CORRECT answer! :) \")\n time.sleep(2)\n print(\"See you next time !\")\n time.sleep(3)\n return True\n else:\n print(\"This is a WRONG answer !\")\n time.sleep(2)\n print(\"See you next time ! :)\")\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print(\"****** Welcome to the Memory Game! ******\" + \"\\n\")\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getData(request):
index = request.GET.get('index')
msg = '未查找到数据'
if ExtExecute.objects.filter(query_code=index):
ext = ExtExecute.objects.filter(query_code=index).first()
result = getpicture(ext.upload_file.path)
if result:
for i in result:
i = request.META.get('HTTP_HOST') + i
subject = ext.extSubmit.subProject
dataset = ext.sampleinfoext_set.all()
type = 1
elif LibExecute.objects.filter(query_code=index):
result = getpicture(LibExecute.objects.filter(query_code=index).
first().upload_file.path)
if result:
for i in result:
i = request.META.get('HTTP_HOST') + i
subject = LibExecute.objects.filter(query_code=index).first(
).libSubmit.subProject
dataset = LibExecute.objects.filter(query_code=index).first(
).sampleinfolib_set.all()
type = 2
elif SeqExecute.objects.filter(query_code=index):
subject = SeqExecute.objects.filter(query_code=index).first(
).seqSubmit.subProject
dataset = SeqExecute.objects.filter(query_code=index).first(
).sampleinfoseq_set.all()
type = 3
return render(request, 'Showdata.html', {'data': dataset, 'type':
type, 'subject': subject})
else:
return render(request, 'Showdata.html', {'error': msg})
return render(request, 'Showdata.html', {'data': dataset, 'type': type,
'subject': subject, 'pic': result})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getpicture(word):
if word.split('.')[1] not in ['doc', 'docx']:
return None
word_zip = word.split('.')[0] + '.zip'
path = ''
for i in word.split('/')[0:-1]:
path += i
path += '/'
path += 'tem/'
if not os.path.exists(path):
os.rename(word, word_zip)
f = zipfile.ZipFile(word_zip, 'r')
for file in f.filelist:
f.extract(file, path)
f.close()
os.rename(word_zip, word)
pic = os.listdir(os.path.join(path, 'word/media'))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path, 'word/media/') + i)
for j in result:
url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(
'/media/')[2]
result_.append(url)
return result_
else:
pic = os.listdir(os.path.join(path, 'word/media'))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path, 'word/media/') + i)
for j in result:
url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(
'/media/')[2]
result_.append(url)
return result_
def getData(request):
index = request.GET.get('index')
msg = '未查找到数据'
if ExtExecute.objects.filter(query_code=index):
ext = ExtExecute.objects.filter(query_code=index).first()
result = getpicture(ext.upload_file.path)
if result:
for i in result:
i = request.META.get('HTTP_HOST') + i
subject = ext.extSubmit.subProject
dataset = ext.sampleinfoext_set.all()
type = 1
elif LibExecute.objects.filter(query_code=index):
result = getpicture(LibExecute.objects.filter(query_code=index).
first().upload_file.path)
if result:
for i in result:
i = request.META.get('HTTP_HOST') + i
subject = LibExecute.objects.filter(query_code=index).first(
).libSubmit.subProject
dataset = LibExecute.objects.filter(query_code=index).first(
).sampleinfolib_set.all()
type = 2
elif SeqExecute.objects.filter(query_code=index):
subject = SeqExecute.objects.filter(query_code=index).first(
).seqSubmit.subProject
dataset = SeqExecute.objects.filter(query_code=index).first(
).sampleinfoseq_set.all()
type = 3
return render(request, 'Showdata.html', {'data': dataset, 'type':
type, 'subject': subject})
else:
return render(request, 'Showdata.html', {'error': msg})
return render(request, 'Showdata.html', {'data': dataset, 'type': type,
'subject': subject, 'pic': result})
<|reserved_special_token_1|>
import datetime
from django.shortcuts import render
from lims.models import *
import os
import zipfile
def getpicture(word):
if word.split('.')[1] not in ['doc', 'docx']:
return None
word_zip = word.split('.')[0] + '.zip'
path = ''
for i in word.split('/')[0:-1]:
path += i
path += '/'
path += 'tem/'
if not os.path.exists(path):
os.rename(word, word_zip)
f = zipfile.ZipFile(word_zip, 'r')
for file in f.filelist:
f.extract(file, path)
f.close()
os.rename(word_zip, word)
pic = os.listdir(os.path.join(path, 'word/media'))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path, 'word/media/') + i)
for j in result:
url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(
'/media/')[2]
result_.append(url)
return result_
else:
pic = os.listdir(os.path.join(path, 'word/media'))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path, 'word/media/') + i)
for j in result:
url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(
'/media/')[2]
result_.append(url)
return result_
def getData(request):
index = request.GET.get('index')
msg = '未查找到数据'
if ExtExecute.objects.filter(query_code=index):
ext = ExtExecute.objects.filter(query_code=index).first()
result = getpicture(ext.upload_file.path)
if result:
for i in result:
i = request.META.get('HTTP_HOST') + i
subject = ext.extSubmit.subProject
dataset = ext.sampleinfoext_set.all()
type = 1
elif LibExecute.objects.filter(query_code=index):
result = getpicture(LibExecute.objects.filter(query_code=index).
first().upload_file.path)
if result:
for i in result:
i = request.META.get('HTTP_HOST') + i
subject = LibExecute.objects.filter(query_code=index).first(
).libSubmit.subProject
dataset = LibExecute.objects.filter(query_code=index).first(
).sampleinfolib_set.all()
type = 2
elif SeqExecute.objects.filter(query_code=index):
subject = SeqExecute.objects.filter(query_code=index).first(
).seqSubmit.subProject
dataset = SeqExecute.objects.filter(query_code=index).first(
).sampleinfoseq_set.all()
type = 3
return render(request, 'Showdata.html', {'data': dataset, 'type':
type, 'subject': subject})
else:
return render(request, 'Showdata.html', {'error': msg})
return render(request, 'Showdata.html', {'data': dataset, 'type': type,
'subject': subject, 'pic': result})
<|reserved_special_token_1|>
import datetime
from django.shortcuts import render
from lims.models import *
import os
import zipfile
def getpicture(word):
if word.split(".")[1] not in ["doc","docx"]:
return None
word_zip = word.split(".")[0] + ".zip"
path = ""
for i in word.split("/")[0:-1]:
path += i
path += "/"
path += "tem/"
if not os.path.exists(path):
os.rename(word,word_zip)
f = zipfile.ZipFile(word_zip,"r")
for file in f.filelist:
f.extract(file,path)
f.close()
os.rename(word_zip,word)
pic = os.listdir(os.path.join(path,"word/media"))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path,"word/media/") + i)
for j in result:
url = "/media/" + j.split("/media/")[1] + "/media/" + j.split("/media/")[2]
result_.append(url)
return result_
else:
pic = os.listdir(os.path.join(path, "word/media"))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path, "word/media/") + i)
for j in result:
url = "/media/" + j.split("/media/")[1] + "/media/" +j.split("/media/")[2]
result_.append(url)
return result_
def getData(request):
index = request.GET.get("index")
msg = "未查找到数据"
if ExtExecute.objects.filter(query_code=index):
ext = ExtExecute.objects.filter(query_code=index).first()
result = getpicture(ext.upload_file.path)
if result:
for i in result:
i = request.META.get("HTTP_HOST") + i
subject = ext.extSubmit.subProject
dataset = ext.sampleinfoext_set.all()
type = 1
elif LibExecute.objects.filter(query_code=index):
result = getpicture(LibExecute.objects.filter(query_code=index).first().upload_file.path)
if result:
for i in result:
i = request.META.get("HTTP_HOST") + i
subject = LibExecute.objects.filter(query_code=index).first().libSubmit.subProject
dataset = LibExecute.objects.filter(query_code=index).first().sampleinfolib_set.all()
type = 2
elif SeqExecute.objects.filter(query_code=index):
subject = SeqExecute.objects.filter(query_code=index).first().seqSubmit.subProject
dataset = SeqExecute.objects.filter(query_code=index).first().sampleinfoseq_set.all()
type = 3
return render(request, "Showdata.html", {"data": dataset, "type": type, "subject": subject})
else:
return render(request,"Showdata.html",{"error":msg})
return render(request,"Showdata.html",{"data":dataset,"type":type,"subject":subject,"pic":result})
|
flexible
|
{
"blob_id": "e32c73abdcd384ee7c369182527cca6495f067b3",
"index": 1977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getData(request):\n index = request.GET.get('index')\n msg = '未查找到数据'\n if ExtExecute.objects.filter(query_code=index):\n ext = ExtExecute.objects.filter(query_code=index).first()\n result = getpicture(ext.upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = ext.extSubmit.subProject\n dataset = ext.sampleinfoext_set.all()\n type = 1\n elif LibExecute.objects.filter(query_code=index):\n result = getpicture(LibExecute.objects.filter(query_code=index).\n first().upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = LibExecute.objects.filter(query_code=index).first(\n ).libSubmit.subProject\n dataset = LibExecute.objects.filter(query_code=index).first(\n ).sampleinfolib_set.all()\n type = 2\n elif SeqExecute.objects.filter(query_code=index):\n subject = SeqExecute.objects.filter(query_code=index).first(\n ).seqSubmit.subProject\n dataset = SeqExecute.objects.filter(query_code=index).first(\n ).sampleinfoseq_set.all()\n type = 3\n return render(request, 'Showdata.html', {'data': dataset, 'type':\n type, 'subject': subject})\n else:\n return render(request, 'Showdata.html', {'error': msg})\n return render(request, 'Showdata.html', {'data': dataset, 'type': type,\n 'subject': subject, 'pic': result})\n",
"step-3": "<mask token>\n\n\ndef getpicture(word):\n if word.split('.')[1] not in ['doc', 'docx']:\n return None\n word_zip = word.split('.')[0] + '.zip'\n path = ''\n for i in word.split('/')[0:-1]:\n path += i\n path += '/'\n path += 'tem/'\n if not os.path.exists(path):\n os.rename(word, word_zip)\n f = zipfile.ZipFile(word_zip, 'r')\n for file in f.filelist:\n f.extract(file, path)\n f.close()\n os.rename(word_zip, word)\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n else:\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n\n\ndef getData(request):\n index = request.GET.get('index')\n msg = '未查找到数据'\n if ExtExecute.objects.filter(query_code=index):\n ext = ExtExecute.objects.filter(query_code=index).first()\n result = getpicture(ext.upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = ext.extSubmit.subProject\n dataset = ext.sampleinfoext_set.all()\n type = 1\n elif LibExecute.objects.filter(query_code=index):\n result = getpicture(LibExecute.objects.filter(query_code=index).\n first().upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = LibExecute.objects.filter(query_code=index).first(\n ).libSubmit.subProject\n dataset = LibExecute.objects.filter(query_code=index).first(\n ).sampleinfolib_set.all()\n type = 2\n elif SeqExecute.objects.filter(query_code=index):\n subject = SeqExecute.objects.filter(query_code=index).first(\n ).seqSubmit.subProject\n dataset = SeqExecute.objects.filter(query_code=index).first(\n ).sampleinfoseq_set.all()\n type = 3\n return render(request, 'Showdata.html', {'data': dataset, 'type':\n type, 'subject': subject})\n else:\n return render(request, 'Showdata.html', {'error': msg})\n return render(request, 'Showdata.html', {'data': dataset, 'type': type,\n 'subject': subject, 'pic': result})\n",
"step-4": "import datetime\nfrom django.shortcuts import render\nfrom lims.models import *\nimport os\nimport zipfile\n\n\ndef getpicture(word):\n if word.split('.')[1] not in ['doc', 'docx']:\n return None\n word_zip = word.split('.')[0] + '.zip'\n path = ''\n for i in word.split('/')[0:-1]:\n path += i\n path += '/'\n path += 'tem/'\n if not os.path.exists(path):\n os.rename(word, word_zip)\n f = zipfile.ZipFile(word_zip, 'r')\n for file in f.filelist:\n f.extract(file, path)\n f.close()\n os.rename(word_zip, word)\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n else:\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n\n\ndef getData(request):\n index = request.GET.get('index')\n msg = '未查找到数据'\n if ExtExecute.objects.filter(query_code=index):\n ext = ExtExecute.objects.filter(query_code=index).first()\n result = getpicture(ext.upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = ext.extSubmit.subProject\n dataset = ext.sampleinfoext_set.all()\n type = 1\n elif LibExecute.objects.filter(query_code=index):\n result = getpicture(LibExecute.objects.filter(query_code=index).\n first().upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = LibExecute.objects.filter(query_code=index).first(\n ).libSubmit.subProject\n dataset = LibExecute.objects.filter(query_code=index).first(\n ).sampleinfolib_set.all()\n type = 2\n elif SeqExecute.objects.filter(query_code=index):\n subject = SeqExecute.objects.filter(query_code=index).first(\n ).seqSubmit.subProject\n dataset = SeqExecute.objects.filter(query_code=index).first(\n ).sampleinfoseq_set.all()\n type = 3\n return render(request, 'Showdata.html', {'data': dataset, 'type':\n type, 'subject': subject})\n else:\n return render(request, 'Showdata.html', {'error': msg})\n return render(request, 'Showdata.html', {'data': dataset, 'type': type,\n 'subject': subject, 'pic': result})\n",
"step-5": "import datetime\r\n\r\nfrom django.shortcuts import render\r\nfrom lims.models import *\r\nimport os\r\nimport zipfile\r\n\r\ndef getpicture(word):\r\n if word.split(\".\")[1] not in [\"doc\",\"docx\"]:\r\n return None\r\n word_zip = word.split(\".\")[0] + \".zip\"\r\n path = \"\"\r\n for i in word.split(\"/\")[0:-1]:\r\n path += i\r\n path += \"/\"\r\n path += \"tem/\"\r\n if not os.path.exists(path):\r\n os.rename(word,word_zip)\r\n f = zipfile.ZipFile(word_zip,\"r\")\r\n for file in f.filelist:\r\n f.extract(file,path)\r\n f.close()\r\n os.rename(word_zip,word)\r\n pic = os.listdir(os.path.join(path,\"word/media\"))\r\n result = []\r\n result_ = []\r\n for i in pic:\r\n result.append(os.path.join(path,\"word/media/\") + i)\r\n for j in result:\r\n url = \"/media/\" + j.split(\"/media/\")[1] + \"/media/\" + j.split(\"/media/\")[2]\r\n result_.append(url)\r\n return result_\r\n else:\r\n pic = os.listdir(os.path.join(path, \"word/media\"))\r\n result = []\r\n result_ = []\r\n for i in pic:\r\n result.append(os.path.join(path, \"word/media/\") + i)\r\n for j in result:\r\n url = \"/media/\" + j.split(\"/media/\")[1] + \"/media/\" +j.split(\"/media/\")[2]\r\n result_.append(url)\r\n return result_\r\n\r\n\r\ndef getData(request):\r\n index = request.GET.get(\"index\")\r\n msg = \"未查找到数据\"\r\n if ExtExecute.objects.filter(query_code=index):\r\n ext = ExtExecute.objects.filter(query_code=index).first()\r\n result = getpicture(ext.upload_file.path)\r\n if result:\r\n for i in result:\r\n i = request.META.get(\"HTTP_HOST\") + i\r\n subject = ext.extSubmit.subProject\r\n dataset = ext.sampleinfoext_set.all()\r\n type = 1\r\n elif LibExecute.objects.filter(query_code=index):\r\n result = getpicture(LibExecute.objects.filter(query_code=index).first().upload_file.path)\r\n if result:\r\n for i in result:\r\n i = request.META.get(\"HTTP_HOST\") + i\r\n subject = LibExecute.objects.filter(query_code=index).first().libSubmit.subProject\r\n dataset = LibExecute.objects.filter(query_code=index).first().sampleinfolib_set.all()\r\n type = 2\r\n elif SeqExecute.objects.filter(query_code=index):\r\n subject = SeqExecute.objects.filter(query_code=index).first().seqSubmit.subProject\r\n dataset = SeqExecute.objects.filter(query_code=index).first().sampleinfoseq_set.all()\r\n type = 3\r\n return render(request, \"Showdata.html\", {\"data\": dataset, \"type\": type, \"subject\": subject})\r\n else:\r\n return render(request,\"Showdata.html\",{\"error\":msg})\r\n return render(request,\"Showdata.html\",{\"data\":dataset,\"type\":type,\"subject\":subject,\"pic\":result})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python2
import unittest
import luna_utils as luna
import time
API_URL = "com.webos.service.videooutput/"
VERBOSE_LOG = True
SUPPORT_REGISTER = False
SINK_MAIN = "MAIN"
SINK_SUB = "SUB0"
#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state
#SINK_LIST = [SINK_MAIN, SINK_SUB]
SINK_LIST = [SINK_MAIN]
PID1 = "pipeline1"
PID2 = "pipeline2"
PID_LIST = [PID1, PID2]
INPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}
OUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}
#Choose source type VDEC or HDMI for test input
#SOURCE_NAME = SOURCE_NAME
#SOURCE_PORT = 0
SOURCE_NAME = "HDMI"
SOURCE_PORT = 3
SOURCE_WIDTH = 1920
SOURCE_HEIGHT = 1080
SLEEP_TIME = 1
class TestVideoMethods(luna.TestBase):
def vlog(self, message):
if VERBOSE_LOG:
print(message)
def setUp(self):
self.vlog("setUp")
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("register " + pid)
luna.call(API_URL + "register", { "context": pid })
self.statusSub = luna.subscribe(API_URL + "getStatus", {"subscribe":True})
def tearDown(self):
self.vlog("tearDown")
for sink in SINK_LIST:
self.vlog("disconnect " + sink)
luna.call(API_URL + "disconnect", { "sink": sink })
if SUPPORT_REGISTER:
for pid in PID_LIST:
self.vlog("unregister " + pid)
luna.call(API_URL + "unregister", { "context": pid })
luna.cancelSubscribe(self.statusSub)
def connect(self, sink, source, port, pid):
self.vlog("connect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{ "outputMode": "DISPLAY", "sink": sink, "source": source, "sourcePort": port },
self.statusSub,
{"video":[{"sink": sink, "connectedSource": source, "connectedSourcePort": port}]})
def mute(self, sink, blank):
self.vlog("- Mute" + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "blankVideo",
{"sink": sink, "blank": blank},
self.statusSub,
{"video":[{"sink": sink, "muted": blank}]})
def disconnect(self, sink, pid):
self.vlog("disconnect " + sink)
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "disconnect", { "sink": sink },
self.statusSub,
{"video": [{"sink": sink, "connectedSource": None}]})
def testConnectDisconnect(self):
print("[testConnectDisconnect]")
for source, ports in {"VDEC":[0,1], "HDMI":[0,1,2]}.iteritems():
for port in ports:
for sink in SINK_LIST:
for i in range(3):
self.connect(sink, source, port, "")
self.disconnect(sink, "")
def testDualConnect(self):
print("[testDualConnect]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + "connect",
{"outputMode": "DISPLAY", "sink": SINK_SUB, "source": SOURCE_NAME, "sourcePort": SOURCE_PORT},
self.statusSub,
{"video": [{"sink": SINK_MAIN, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT},
{"sink": SINK_SUB, "connectedSource": SOURCE_NAME, "connectedSourcePort": SOURCE_PORT}]})
self.disconnect(SINK_MAIN, "")
if len(SINK_LIST) > 1:
self.disconnect(SINK_SUB, "")
def testMute(self):
print("[testMute]")
for sink in SINK_LIST:
self.connect(sink, SOURCE_NAME, SOURCE_PORT, "")
for blank in [False, True]:
self.mute(sink, blank)
#test different orders of display window and media data
def testSetDisplayWindowAndVideoData(self):
print("[testSetDisplayWindowAndVideoData]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":0,
"height":0,
"frameRate":0,
"sourceInput": {"x":0, "y":0, "width":0, "height":0}, # no media data yet so can't determine appliedsourceInput yet
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": "MAIN",
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetVideoDataAndDisplayWindow(self):
print("[testSetVideoDataAndDisplayWindow]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": "MAIN",
"fullScreen": False,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":INPUT_RECT['X'], "y":INPUT_RECT['Y'], "width":INPUT_RECT['W'], "height":INPUT_RECT['H']},
"displayOutput": {"x":OUTPUT_RECT['X'], "y":OUTPUT_RECT['Y'], "width":OUTPUT_RECT['W'], "height":OUTPUT_RECT['H']}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetFullscreen(self):
print("[testSetFullscreen]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "setVideoData",
{"sink": SINK_MAIN,
"contentType": "media",
"frameRate":29.5,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"scanType":"progressive",
"adaptive": False},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": False,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":0, "height":0},
"displayOutput": {"x":0, "y":0, "width":0, "height":0}
}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN,
"fullScreen": True,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT}},
self.statusSub,
{"video":[{"sink": SINK_MAIN,
"fullScreen": True,
"width":SOURCE_WIDTH,
"height":SOURCE_HEIGHT,
"frameRate":29.5,
"sourceInput": {"x":0, "y":0, "width":SOURCE_WIDTH, "height":SOURCE_HEIGHT},
"displayOutput": {"x":0, "y":0, "width":3840, "height":2160}
}]})
self.mute(SINK_MAIN, False)
time.sleep(SLEEP_TIME)
def testSetCompositing(self):
print("[testSetCompositing]")
self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, "")
if len(SINK_LIST) > 1:
self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, "")
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setCompositing",
{"composeOrder": [{"sink":SINK_MAIN, "opacity":20, "zOrder":1},
{"sink":SINK_SUB, "opacity":31, "zOrder":0}]},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":20, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_MAIN, "fullScreen":True, "opacity":130},
self.statusSub, {"video":[{"sink": SINK_MAIN, "opacity":130, "zOrder":1}]})
if len(SINK_LIST) > 1:
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":200},
self.statusSub, {"video":[{"sink": "SUB0", "opacity":200, "zOrder":0}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":230},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":230, "zOrder":1}]})
self.checkLunaCallSuccessAndSubscriptionUpdate(
API_URL + "display/setDisplayWindow",
{"sink": SINK_SUB, "fullScreen":True, "opacity":30, "zOrder": 1},
self.statusSub, {"video":[{"sink": "MAIN", "opacity":130, "zOrder":0}, {"sink": "SUB0", "opacity":30, "zOrder":1}]})
if __name__ == '__main__':
luna.VERBOSE = False
unittest.main()
|
normal
|
{
"blob_id": "27e66b2a03bc626d5babd804e736a4652ba030d5",
"index": 8624,
"step-1": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n <mask token>\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n <mask token>\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n <mask token>\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-4": "import unittest\nimport luna_utils as luna\nimport time\nAPI_URL = 'com.webos.service.videooutput/'\nVERBOSE_LOG = True\nSUPPORT_REGISTER = False\nSINK_MAIN = 'MAIN'\nSINK_SUB = 'SUB0'\nSINK_LIST = [SINK_MAIN]\nPID1 = 'pipeline1'\nPID2 = 'pipeline2'\nPID_LIST = [PID1, PID2]\nINPUT_RECT = {'X': 0, 'Y': 0, 'W': 1920, 'H': 1080}\nOUTPUT_RECT = {'X': 400, 'Y': 400, 'W': 1920, 'H': 1080}\nSOURCE_NAME = 'HDMI'\nSOURCE_PORT = 3\nSOURCE_WIDTH = 1920\nSOURCE_HEIGHT = 1080\nSLEEP_TIME = 1\n\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog('setUp')\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('register ' + pid)\n luna.call(API_URL + 'register', {'context': pid})\n self.statusSub = luna.subscribe(API_URL + 'getStatus', {'subscribe':\n True})\n\n def tearDown(self):\n self.vlog('tearDown')\n for sink in SINK_LIST:\n self.vlog('disconnect ' + sink)\n luna.call(API_URL + 'disconnect', {'sink': sink})\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog('unregister ' + pid)\n luna.call(API_URL + 'unregister', {'context': pid})\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog('connect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + 'connect',\n {'outputMode': 'DISPLAY', 'sink': sink, 'source': source,\n 'sourcePort': port}, self.statusSub, {'video': [{'sink': sink,\n 'connectedSource': source, 'connectedSourcePort': port}]})\n\n def mute(self, sink, blank):\n self.vlog('- Mute' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'blankVideo', {'sink': sink, 'blank': blank}, self.statusSub, {\n 'video': [{'sink': sink, 'muted': blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog('disconnect ' + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'disconnect', {'sink': sink}, self.statusSub, {'video': [{\n 'sink': sink, 'connectedSource': None}]})\n\n def testConnectDisconnect(self):\n print('[testConnectDisconnect]')\n for source, ports in {'VDEC': [0, 1], 'HDMI': [0, 1, 2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, '')\n self.disconnect(sink, '')\n\n def testDualConnect(self):\n print('[testDualConnect]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'connect', {'outputMode': 'DISPLAY', 'sink': SINK_SUB,\n 'source': SOURCE_NAME, 'sourcePort': SOURCE_PORT}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'connectedSource':\n SOURCE_NAME, 'connectedSourcePort': SOURCE_PORT}, {'sink':\n SINK_SUB, 'connectedSource': SOURCE_NAME,\n 'connectedSourcePort': SOURCE_PORT}]})\n self.disconnect(SINK_MAIN, '')\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, '')\n\n def testMute(self):\n print('[testMute]')\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, '')\n for blank in [False, True]:\n self.mute(sink, blank)\n\n def testSetDisplayWindowAndVideoData(self):\n print('[testSetDisplayWindowAndVideoData]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': 0, 'height': 0, 'frameRate': 0, 'sourceInput': {'x': 0,\n 'y': 0, 'width': 0, 'height': 0}, 'displayOutput': {'x':\n OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT[\n 'W'], 'height': OUTPUT_RECT['H']}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': 'MAIN', 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}, 'displayOutput': {'x': OUTPUT_RECT[\n 'X'], 'y': OUTPUT_RECT['Y'], 'width': OUTPUT_RECT['W'],\n 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print('[testSetVideoDataAndDisplayWindow]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': 'MAIN', 'fullScreen': \n False, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT[\n 'Y'], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}, self.\n statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': False,\n 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT, 'frameRate': \n 29.5, 'sourceInput': {'x': INPUT_RECT['X'], 'y': INPUT_RECT['Y'\n ], 'width': INPUT_RECT['W'], 'height': INPUT_RECT['H']},\n 'displayOutput': {'x': OUTPUT_RECT['X'], 'y': OUTPUT_RECT['Y'],\n 'width': OUTPUT_RECT['W'], 'height': OUTPUT_RECT['H']}}]})\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print('[testSetFullscreen]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'setVideoData', {'sink': SINK_MAIN, 'contentType': 'media',\n 'frameRate': 29.5, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'scanType': 'progressive', 'adaptive': False},\n self.statusSub, {'video': [{'sink': SINK_MAIN, 'fullScreen': \n False, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT,\n 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}, 'displayOutput': {'x': 0, 'y': 0, 'width': 0,\n 'height': 0}}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'sourceInput': {'x': 0, 'y': 0, 'width': SOURCE_WIDTH,\n 'height': SOURCE_HEIGHT}}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'fullScreen': True, 'width': SOURCE_WIDTH, 'height':\n SOURCE_HEIGHT, 'frameRate': 29.5, 'sourceInput': {'x': 0, 'y': \n 0, 'width': SOURCE_WIDTH, 'height': SOURCE_HEIGHT},\n 'displayOutput': {'x': 0, 'y': 0, 'width': 3840, 'height': 2160}}]}\n )\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print('[testSetCompositing]')\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, '')\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, '')\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setCompositing', {'composeOrder': [{'sink': SINK_MAIN,\n 'opacity': 20, 'zOrder': 1}, {'sink': SINK_SUB, 'opacity': 31,\n 'zOrder': 0}]}, self.statusSub, {'video': [{'sink': 'MAIN',\n 'opacity': 20, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_MAIN, 'fullScreen': \n True, 'opacity': 130}, self.statusSub, {'video': [{'sink':\n SINK_MAIN, 'opacity': 130, 'zOrder': 1}]})\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 200}, self.statusSub, {'video': [{'sink':\n 'SUB0', 'opacity': 200, 'zOrder': 0}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 230}, self.statusSub, {'video': [{'sink':\n 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink': 'SUB0',\n 'opacity': 230, 'zOrder': 1}]})\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL +\n 'display/setDisplayWindow', {'sink': SINK_SUB, 'fullScreen':\n True, 'opacity': 30, 'zOrder': 1}, self.statusSub, {'video':\n [{'sink': 'MAIN', 'opacity': 130, 'zOrder': 0}, {'sink':\n 'SUB0', 'opacity': 30, 'zOrder': 1}]})\n\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-5": "#!/usr/bin/python2\nimport unittest\nimport luna_utils as luna\nimport time\n\nAPI_URL = \"com.webos.service.videooutput/\"\n\nVERBOSE_LOG = True\nSUPPORT_REGISTER = False\n\nSINK_MAIN = \"MAIN\"\nSINK_SUB = \"SUB0\"\n\n#TODO(ekwang): If you connect SUB, HAL error occurs. Just test MAIN in the current state\n#SINK_LIST = [SINK_MAIN, SINK_SUB]\nSINK_LIST = [SINK_MAIN]\n\nPID1 = \"pipeline1\"\nPID2 = \"pipeline2\"\n\nPID_LIST = [PID1, PID2]\n\nINPUT_RECT = {'X':0, 'Y':0, 'W':1920, 'H':1080}\nOUTPUT_RECT = {'X':400, 'Y':400, 'W':1920, 'H':1080}\n\n#Choose source type VDEC or HDMI for test input\n#SOURCE_NAME = SOURCE_NAME\n#SOURCE_PORT = 0\nSOURCE_NAME = \"HDMI\"\nSOURCE_PORT = 3\n\nSOURCE_WIDTH = 1920\nSOURCE_HEIGHT = 1080\n\nSLEEP_TIME = 1\n\nclass TestVideoMethods(luna.TestBase):\n\n def vlog(self, message):\n if VERBOSE_LOG:\n print(message)\n\n def setUp(self):\n self.vlog(\"setUp\")\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog(\"register \" + pid)\n luna.call(API_URL + \"register\", { \"context\": pid })\n\n self.statusSub = luna.subscribe(API_URL + \"getStatus\", {\"subscribe\":True})\n\n def tearDown(self):\n self.vlog(\"tearDown\")\n for sink in SINK_LIST:\n self.vlog(\"disconnect \" + sink)\n luna.call(API_URL + \"disconnect\", { \"sink\": sink })\n\n if SUPPORT_REGISTER:\n for pid in PID_LIST:\n self.vlog(\"unregister \" + pid)\n luna.call(API_URL + \"unregister\", { \"context\": pid })\n\n luna.cancelSubscribe(self.statusSub)\n\n def connect(self, sink, source, port, pid):\n self.vlog(\"connect \" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"connect\",\n { \"outputMode\": \"DISPLAY\", \"sink\": sink, \"source\": source, \"sourcePort\": port },\n self.statusSub,\n {\"video\":[{\"sink\": sink, \"connectedSource\": source, \"connectedSourcePort\": port}]})\n\n def mute(self, sink, blank):\n self.vlog(\"- Mute\" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"blankVideo\",\n {\"sink\": sink, \"blank\": blank},\n self.statusSub,\n {\"video\":[{\"sink\": sink, \"muted\": blank}]})\n\n def disconnect(self, sink, pid):\n self.vlog(\"disconnect \" + sink)\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"disconnect\", { \"sink\": sink },\n self.statusSub,\n {\"video\": [{\"sink\": sink, \"connectedSource\": None}]})\n\n def testConnectDisconnect(self):\n print(\"[testConnectDisconnect]\")\n for source, ports in {\"VDEC\":[0,1], \"HDMI\":[0,1,2]}.iteritems():\n for port in ports:\n for sink in SINK_LIST:\n for i in range(3):\n self.connect(sink, source, port, \"\")\n self.disconnect(sink, \"\")\n\n def testDualConnect(self):\n print(\"[testDualConnect]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(API_URL + \"connect\",\n {\"outputMode\": \"DISPLAY\", \"sink\": SINK_SUB, \"source\": SOURCE_NAME, \"sourcePort\": SOURCE_PORT},\n self.statusSub,\n {\"video\": [{\"sink\": SINK_MAIN, \"connectedSource\": SOURCE_NAME, \"connectedSourcePort\": SOURCE_PORT},\n {\"sink\": SINK_SUB, \"connectedSource\": SOURCE_NAME, \"connectedSourcePort\": SOURCE_PORT}]})\n\n self.disconnect(SINK_MAIN, \"\")\n if len(SINK_LIST) > 1:\n self.disconnect(SINK_SUB, \"\")\n\n def testMute(self):\n print(\"[testMute]\")\n for sink in SINK_LIST:\n self.connect(sink, SOURCE_NAME, SOURCE_PORT, \"\")\n\n for blank in [False, True]:\n self.mute(sink, blank)\n\n #test different orders of display window and media data\n\n def testSetDisplayWindowAndVideoData(self):\n print(\"[testSetDisplayWindowAndVideoData]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}},\n self.statusSub,\n {\"video\":[{\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"width\":0,\n \"height\":0,\n \"frameRate\":0,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}, # no media data yet so can't determine appliedsourceInput yet\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetVideoDataAndDisplayWindow(self):\n print(\"[testSetVideoDataAndDisplayWindow]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": \"MAIN\",\n \"fullScreen\": False,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":INPUT_RECT['X'], \"y\":INPUT_RECT['Y'], \"width\":INPUT_RECT['W'], \"height\":INPUT_RECT['H']},\n \"displayOutput\": {\"x\":OUTPUT_RECT['X'], \"y\":OUTPUT_RECT['Y'], \"width\":OUTPUT_RECT['W'], \"height\":OUTPUT_RECT['H']}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetFullscreen(self):\n print(\"[testSetFullscreen]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"setVideoData\",\n {\"sink\": SINK_MAIN,\n \"contentType\": \"media\",\n \"frameRate\":29.5,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"scanType\":\"progressive\",\n \"adaptive\": False},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": False,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":0, \"height\":0}\n }]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN,\n \"fullScreen\": True,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT}},\n self.statusSub,\n {\"video\":[{\"sink\": SINK_MAIN,\n \"fullScreen\": True,\n \"width\":SOURCE_WIDTH,\n \"height\":SOURCE_HEIGHT,\n \"frameRate\":29.5,\n \"sourceInput\": {\"x\":0, \"y\":0, \"width\":SOURCE_WIDTH, \"height\":SOURCE_HEIGHT},\n \"displayOutput\": {\"x\":0, \"y\":0, \"width\":3840, \"height\":2160}\n }]})\n\n self.mute(SINK_MAIN, False)\n time.sleep(SLEEP_TIME)\n\n def testSetCompositing(self):\n print(\"[testSetCompositing]\")\n self.connect(SINK_MAIN, SOURCE_NAME, SOURCE_PORT, \"\")\n if len(SINK_LIST) > 1:\n self.connect(SINK_SUB, SOURCE_NAME, SOURCE_PORT, \"\")\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setCompositing\",\n {\"composeOrder\": [{\"sink\":SINK_MAIN, \"opacity\":20, \"zOrder\":1},\n {\"sink\":SINK_SUB, \"opacity\":31, \"zOrder\":0}]},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":20, \"zOrder\":1}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_MAIN, \"fullScreen\":True, \"opacity\":130},\n self.statusSub, {\"video\":[{\"sink\": SINK_MAIN, \"opacity\":130, \"zOrder\":1}]})\n\n if len(SINK_LIST) > 1:\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":200},\n self.statusSub, {\"video\":[{\"sink\": \"SUB0\", \"opacity\":200, \"zOrder\":0}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":230},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":130, \"zOrder\":0}, {\"sink\": \"SUB0\", \"opacity\":230, \"zOrder\":1}]})\n\n self.checkLunaCallSuccessAndSubscriptionUpdate(\n API_URL + \"display/setDisplayWindow\",\n {\"sink\": SINK_SUB, \"fullScreen\":True, \"opacity\":30, \"zOrder\": 1},\n self.statusSub, {\"video\":[{\"sink\": \"MAIN\", \"opacity\":130, \"zOrder\":0}, {\"sink\": \"SUB0\", \"opacity\":30, \"zOrder\":1}]})\n\nif __name__ == '__main__':\n luna.VERBOSE = False\n unittest.main()\n",
"step-ids": [
11,
14,
15,
17,
18
]
}
|
[
11,
14,
15,
17,
18
] |
#!/usr/bin/env python
import unittest
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io.mmtfReader import download_mmtf_files
from mmtfPyspark.datasets import secondaryStructureExtractor
from mmtfPyspark.filters import ContainsLProteinChain
from mmtfPyspark.mappers import StructureToPolymerChains
class SecondaryStructureExtractorTest(unittest.TestCase):
def setUp(self):
conf = SparkConf().setMaster("local[*]").setAppName('secondaryStructureExtractorTest')
self.sc = SparkContext(conf=conf)
pdbIds = ["1STP","4HHB"]
self.pdb = download_mmtf_files(pdbIds,self.sc)
def test1(self):
pdb = self.pdb.filter(ContainsLProteinChain()) \
.flatMap(StructureToPolymerChains()) \
.filter(ContainsLProteinChain())
seq = secondaryStructureExtractor.get_dataset(pdb)
self.assertTrue(seq.count() == 5)
def tearDown(self):
self.sc.stop()
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "480e6ae9eee70b2da58ca5624a43d8f5dcae1d33",
"index": 1207,
"step-1": "<mask token>\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n <mask token>\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster('local[*]').setAppName(\n 'secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n pdbIds = ['1STP', '4HHB']\n self.pdb = download_mmtf_files(pdbIds, self.sc)\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster('local[*]').setAppName(\n 'secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n pdbIds = ['1STP', '4HHB']\n self.pdb = download_mmtf_files(pdbIds, self.sc)\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom pyspark import SparkConf, SparkContext\nfrom mmtfPyspark.io.mmtfReader import download_mmtf_files\nfrom mmtfPyspark.datasets import secondaryStructureExtractor\nfrom mmtfPyspark.filters import ContainsLProteinChain\nfrom mmtfPyspark.mappers import StructureToPolymerChains\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster('local[*]').setAppName(\n 'secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n pdbIds = ['1STP', '4HHB']\n self.pdb = download_mmtf_files(pdbIds, self.sc)\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()).flatMap(\n StructureToPolymerChains()).filter(ContainsLProteinChain())\n seq = secondaryStructureExtractor.get_dataset(pdb)\n self.assertTrue(seq.count() == 5)\n\n def tearDown(self):\n self.sc.stop()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python\n\nimport unittest\nfrom pyspark import SparkConf, SparkContext\nfrom mmtfPyspark.io.mmtfReader import download_mmtf_files\nfrom mmtfPyspark.datasets import secondaryStructureExtractor\nfrom mmtfPyspark.filters import ContainsLProteinChain\nfrom mmtfPyspark.mappers import StructureToPolymerChains\n\n\nclass SecondaryStructureExtractorTest(unittest.TestCase):\n\n def setUp(self):\n conf = SparkConf().setMaster(\"local[*]\").setAppName('secondaryStructureExtractorTest')\n self.sc = SparkContext(conf=conf)\n\n pdbIds = [\"1STP\",\"4HHB\"]\n self.pdb = download_mmtf_files(pdbIds,self.sc)\n\n\n def test1(self):\n pdb = self.pdb.filter(ContainsLProteinChain()) \\\n .flatMap(StructureToPolymerChains()) \\\n .filter(ContainsLProteinChain())\n\n seq = secondaryStructureExtractor.get_dataset(pdb)\n\n self.assertTrue(seq.count() == 5)\n\n\n def tearDown(self):\n self.sc.stop()\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from floppy.node import Node, Input, Output, Tag, abstractNode
@abstractNode
class StringNode(Node):
Tag('StringOperations')
class StringAppend(StringNode):
"""
Creates a new node which combines two strings. These can be seperated by a delimiter.
:param nodeClass: subclass object of 'Node'.
:return: newly created Node instance.
"""
Input('First', str)
Input('Second', str)
Input('Delimiter', str, optional=True, default='')
Output('Joined', str)
def run(self):
super(StringAppend, self).run()
self._Joined(self._Delimiter.join([self._First, self._Second]))
class ListToString(StringNode):
"""
Creates a new node which combines two strings. These can be seperated by a delimiter.
:param nodeClass: subclass object of 'Node'.
:return: newly created Node instance.
"""
Input('List', object, list=True)
Input('Delimiter', str, optional=True, default='')
Output('Joined', str)
def run(self):
super(ListToString, self).run()
string = []
for element in self._List:
string.append(str(element))
self._Joined(self._Delimiter.join(string))
|
normal
|
{
"blob_id": "1bb151171bbbb899456324056be3634e87b5c8fb",
"index": 3494,
"step-1": "<mask token>\n\n\nclass StringAppend(StringNode):\n <mask token>\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n <mask token>\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-2": "<mask token>\n\n\nclass StringAppend(StringNode):\n <mask token>\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(StringAppend, self).run()\n self._Joined(self._Delimiter.join([self._First, self._Second]))\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-3": "<mask token>\n\n\nclass StringAppend(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(StringAppend, self).run()\n self._Joined(self._Delimiter.join([self._First, self._Second]))\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-4": "from floppy.node import Node, Input, Output, Tag, abstractNode\n\n\n@abstractNode\nclass StringNode(Node):\n Tag('StringOperations')\n\n\nclass StringAppend(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(StringAppend, self).run()\n self._Joined(self._Delimiter.join([self._First, self._Second]))\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-5": null,
"step-ids": [
4,
5,
6,
8
]
}
|
[
4,
5,
6,
8
] |
import urllib, json
from PyQt4.QtCore import QRectF, Qt
from PyQt4.Qt import QPrinter, QPainter, QFont, QBrush, QColor, QPen, QImage
from PyQt4.QtGui import QApplication
# bkgimg = QImage()
# bkgimg.load("KosyMost.jpg", format = "jpg")
#
# print bkgimg
# exit()
def background(painter, bkgimg):
maxx = painter.device().width()
maxy = painter.device().height()
rimg = QRectF(0,0,maxx,maxy*.9)
#
painter.fillRect(0,0,maxx, maxy, QBrush(Qt.red, Qt.SolidPattern))
painter.drawImage(rimg, bkgimg)
wwh = QColor(255,255,255,128)
painter.fillRect(0,2*maxy/10,maxx, 4*maxy/10, QBrush(wwh, Qt.SolidPattern))
u = QRectF(0,9*maxy/10,maxx,maxy/10)
penHText = QPen(Qt.white);
painter.setPen(penHText);
painter.setFont(QFont("Arial", 16, italic=True));
painter.drawText(u, Qt.AlignLeft | Qt.TextIncludeTrailingSpaces | Qt.AlignVCenter , " ekskursja.pl/flashcards")
# painter.drawLine(0,0,maxx,maxy)
# painter.drawLine(0,maxy,maxx,0)
# proxies = {'http': 'http://126.179.0.206:9090' }
headers = {'User-Agent':'MultiFlashcards/fcset.py 0.1'}
url = 'http://ekskursja.pl/wp-content/plugins/flashcards/flashcards.json.php?name=contigo&id=29072'
print url
# response = urllib.urlopen(url, proxies=proxies)
response = urllib.urlopen(url)
data = json.loads(response.read())
app = QApplication([])
printer = QPrinter(QPrinter.HighResolution);
printer.setOutputFormat(QPrinter.PdfFormat);
printer.setPageSize(QPrinter.A6);
printer.setOrientation(QPrinter.Landscape);
printer.setPageMargins (0,0,0,0, QPrinter.Millimeter);
printer.setFullPage(False);
bkgimg = QImage()
if not bkgimg.load("KosyMost.png", format = "png"):
print "Not loaded"
printer.setOutputFileName("contigo.pdf");
painter = QPainter(printer)
maxx = painter.device().width()
maxy = painter.device().height()
print "Wymiary: %d,%d" % (maxx, maxy)
q = QRectF(0,2*maxy/10,maxx,2*maxy/10)
a = QRectF(0,4*maxy/10,maxx,2*maxy/10)
penHText = QPen(QColor("#c60b1e"));
for qa in data['flashcards']:
print "%s -> %s" % (qa['q'], qa['a'][0])
# painter.drawText(painter.device().width()/2, 500, qa['q'])
background(painter, bkgimg)
painter.setPen(penHText);
painter.setFont(QFont("Arial", 24, QFont.Bold));
painter.drawText(q, Qt.AlignCenter, qa['q'])
printer.newPage()
background(painter, bkgimg)
painter.setPen(penHText);
painter.setFont(QFont("Arial", 24, QFont.Bold));
painter.drawText(q, Qt.AlignCenter | Qt.TextWordWrap, qa['q'])
painter.drawText(a, Qt.AlignCenter | Qt.TextWordWrap, qa['a'][0])
printer.newPage()
painter.end()
|
normal
|
{
"blob_id": "47587cce572807922344523d8c5fefb09552fe34",
"index": 8638,
"step-1": "import urllib, json\nfrom PyQt4.QtCore import QRectF, Qt\nfrom PyQt4.Qt import QPrinter, QPainter, QFont, QBrush, QColor, QPen, QImage\nfrom PyQt4.QtGui import QApplication\n\n\n# bkgimg = QImage()\n# bkgimg.load(\"KosyMost.jpg\", format = \"jpg\")\n# \n# print bkgimg\n# exit()\n\ndef background(painter, bkgimg):\n maxx = painter.device().width()\n maxy = painter.device().height()\n \n \n rimg = QRectF(0,0,maxx,maxy*.9)\n# \n painter.fillRect(0,0,maxx, maxy, QBrush(Qt.red, Qt.SolidPattern))\n painter.drawImage(rimg, bkgimg)\n \n wwh = QColor(255,255,255,128)\n \n painter.fillRect(0,2*maxy/10,maxx, 4*maxy/10, QBrush(wwh, Qt.SolidPattern))\n \n u = QRectF(0,9*maxy/10,maxx,maxy/10)\n penHText = QPen(Qt.white);\n painter.setPen(penHText);\n painter.setFont(QFont(\"Arial\", 16, italic=True));\n painter.drawText(u, Qt.AlignLeft | Qt.TextIncludeTrailingSpaces | Qt.AlignVCenter , \" ekskursja.pl/flashcards\")\n \n\n \n\n# painter.drawLine(0,0,maxx,maxy)\n# painter.drawLine(0,maxy,maxx,0)\n \n# proxies = {'http': 'http://126.179.0.206:9090' }\nheaders = {'User-Agent':'MultiFlashcards/fcset.py 0.1'}\n\nurl = 'http://ekskursja.pl/wp-content/plugins/flashcards/flashcards.json.php?name=contigo&id=29072'\n\nprint url\n\n# response = urllib.urlopen(url, proxies=proxies)\nresponse = urllib.urlopen(url)\n\ndata = json.loads(response.read())\n\n\napp = QApplication([])\n\nprinter = QPrinter(QPrinter.HighResolution);\nprinter.setOutputFormat(QPrinter.PdfFormat);\nprinter.setPageSize(QPrinter.A6);\nprinter.setOrientation(QPrinter.Landscape);\nprinter.setPageMargins (0,0,0,0, QPrinter.Millimeter);\nprinter.setFullPage(False);\n\nbkgimg = QImage()\nif not bkgimg.load(\"KosyMost.png\", format = \"png\"):\n print \"Not loaded\"\n\n\nprinter.setOutputFileName(\"contigo.pdf\");\n\n\n\npainter = QPainter(printer)\n\n\nmaxx = painter.device().width()\nmaxy = painter.device().height()\n\nprint \"Wymiary: %d,%d\" % (maxx, maxy)\n\nq = QRectF(0,2*maxy/10,maxx,2*maxy/10)\na = QRectF(0,4*maxy/10,maxx,2*maxy/10)\n\n\n\n\npenHText = QPen(QColor(\"#c60b1e\"));\n\n\nfor qa in data['flashcards']:\n print \"%s -> %s\" % (qa['q'], qa['a'][0])\n# painter.drawText(painter.device().width()/2, 500, qa['q'])\n background(painter, bkgimg)\n painter.setPen(penHText);\n painter.setFont(QFont(\"Arial\", 24, QFont.Bold));\n painter.drawText(q, Qt.AlignCenter, qa['q'])\n printer.newPage()\n \n background(painter, bkgimg)\n painter.setPen(penHText);\n painter.setFont(QFont(\"Arial\", 24, QFont.Bold));\n painter.drawText(q, Qt.AlignCenter | Qt.TextWordWrap, qa['q'])\n painter.drawText(a, Qt.AlignCenter | Qt.TextWordWrap, qa['a'][0])\n printer.newPage()\n \npainter.end()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
<|reserved_special_token_0|>
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
<|reserved_special_token_0|>
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
<|reserved_special_token_0|>
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
<|reserved_special_token_0|>
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
<|reserved_special_token_0|>
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
def test_get_feedbackType(self):
ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
<|reserved_special_token_0|>
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
<|reserved_special_token_0|>
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' /
'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' /
'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' /
'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' /
'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[
'path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])
)
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[
'path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])
)
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
def test_get_feedbackType(self):
ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[
0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(
)[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]
self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))
self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
def test_get_probabilityLeft(self):
pl = training_trials.ProbabilityLeft(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
pl = training_trials.ProbabilityLeft(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0
]
self.assertTrue(isinstance(pl, np.ndarray))
md = raw.load_settings(self.biased_lt5['path'])
if md:
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))
pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0
]
self.assertTrue(isinstance(pl, np.ndarray))
md = raw.load_settings(self.biased_ge5['path'])
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))
def test_get_choice(self):
choice = training_trials.Choice(session_path=self.training_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = training_trials.Choice(session_path=self.training_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_lt5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
choice = biased_trials.Choice(session_path=self.biased_ge5['path']
).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array([(~np.isnan(t['behavior_data'][
'States timestamps']['no_go'][0][0])) for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
def test_get_rewardVolume(self):
rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(rv, np.ndarray))
rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(ft, np.ndarray))
ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_stimOnOffFreeze_times(self):
st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']
).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
def test_get_intervals(self):
di = training_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = training_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps'][
'closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']
).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps'][
'closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[
0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
it = training_trials.IncludedTrials(self.training_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = training_trials.IncludedTrials(self.training_ge5['path']).extract(
)[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(
settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(self.training_lt5['path'], settings
={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',
str(ex.exception))
out, files = training_trials.extract_all(self.training_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'
) as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = {}, []
out, files = biased_trials.extract_all(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
out, files = biased_trials.extract_all(self.biased_ge5['path'],
save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
path = self.training_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,
1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
path = self.biased_lt5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
path = self.biased_ge5['path'] / 'raw_behavior_data'
path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'
):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'], settings={
'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
def test_size_outputs(self):
from ibllib.io.extractors.bpod_trials import extract_all
extract_all(self.training_ge5['path'])
trials = alfio.load_object(self.training_ge5['path'] / 'alf',
object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
extract_all(self.biased_ge5['path'])
trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=
'trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.
array([1, 1]), 'peakVelocity_times': np.array([1, 1])}
function_name = (
'ibllib.io.extractors.training_wheel.extract_wheel_moves')
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.training_lt5['path'])
trials = alfio.load_object(self.training_lt5['path'] / 'alf',
object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.biased_lt5['path'])
trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=
'trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.
is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.
is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.
is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.
is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.
is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()
]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.
is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()
]
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-06, -20])
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + 0.001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) ->None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',
'some_file.npy')
var_names = 'csv', 'ssv', 'tsv', 'npy'
def _extract(self, **kwargs) ->tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return csv, ssv, tsv, npy
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) ->None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
ts += np.full_like(ts, 0.0001).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.0
audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[
'polarities']}
for p in range(n_pulses):
i = p * 2
rise = pulse_width * p + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, "Audio dict shouldn't be effected")
np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667,
40.033333, 40.05])
delay = 0.08
pulse_width = 1e-05
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t +
pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff
=0.005)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,
tolerance=0.1, min_diff=0.005)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))
def test_attribute_times(self, display=False):
tsa = np.linspace(0, 60, 60 * 4)[:60]
tsb = np.linspace(0, 60, 60 * 3)[:45]
tsa = np.sort(np.append(tsa, 0.4))
tsb = np.sort(np.append(tsb, 0.41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
matches = camera.attribute_times(tsa, tsb)
expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23,
25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,
49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
matches = camera.attribute_times(tsa, tsb, injective=False, take=
'nearest')
expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,
18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,
40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])
np.testing.assert_array_equal(matches, expected)
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import functools
import shutil
import tempfile
import unittest
import unittest.mock
from pathlib import Path
import numpy as np
import pandas as pd
import one.alf.io as alfio
from ibllib.io.extractors import training_trials, biased_trials, camera
from ibllib.io import raw_data_loaders as raw
from ibllib.io.extractors.base import BaseExtractor
def wheelMoves_fixture(func):
"""Decorator to save some dummy wheelMoves ALF files for extraction tests"""
@functools.wraps(func)
def wrapper(obj=None):
# Save some wheelMoves ALF files
attr_list = ['training_lt5',
'training_ge5',
'biased_lt5',
'biased_ge5']
alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]
n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]
for p, n in zip(alf_paths, n_trials):
p.mkdir()
np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))
np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))
# Run method
func(obj)
# Teardown; delete the files
for p in alf_paths:
shutil.rmtree(p)
return wrapper
class TestExtractTrialData(unittest.TestCase):
def setUp(self):
self.main_path = Path(__file__).parent
self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}
self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}
self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}
self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}
self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))
self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))
self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))
self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))
# turn off logging for unit testing as we will purposedly go into warning/error cases
self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'
self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'
# Save some dummy wheel moves data for trial firstMovement_times extraction
def test_get_feedbackType(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackType(
self.training_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.training_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = training_trials.FeedbackType(
self.training_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.training_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# BIASED SESSIONS
ft = biased_trials.FeedbackType(
self.biased_lt5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_lt5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
# -- version >= 5.0.0
ft = biased_trials.FeedbackType(
self.biased_ge5['path']).extract()[0]
self.assertEqual(ft.size, self.biased_ge5['ntrials'])
# check if no 0's in feedbackTypes
self.assertFalse(ft[ft == 0].size > 0)
def test_get_contrastLR(self):
# TRAINING SESSIONS
cl, cr = training_trials.ContrastLR(
self.training_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = training_trials.ContrastLR(
self.training_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# BIASED SESSIONS
cl, cr = biased_trials.ContrastLR(
self.biased_lt5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
# -- version >= 5.0.0
cl, cr = biased_trials.ContrastLR(
self.biased_ge5['path']).extract()[0]
self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))
self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))
self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))
self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))
def test_get_probabilityLeft(self):
# TRAINING SESSIONS
pl = training_trials.ProbabilityLeft(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# -- version >= 5.0.0
pl = training_trials.ProbabilityLeft(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# BIASED SESSIONS
pl = biased_trials.ProbabilityLeft(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_lt5['path'])
if md:
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
# -- version >= 5.0.0
pl = biased_trials.ProbabilityLeft(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(pl, np.ndarray))
# Test if only probs that are in prob set
md = raw.load_settings(self.biased_ge5['path'])
probs = md['BLOCK_PROBABILITY_SET']
probs.append(0.5)
self.assertTrue(sum([x in probs for x in pl]) == len(pl))
def test_get_choice(self):
# TRAINING SESSIONS
choice = training_trials.Choice(
session_path=self.training_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = training_trials.Choice(
session_path=self.training_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.training_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# BIASED SESSIONS
choice = biased_trials.Choice(
session_path=self.biased_lt5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_lt5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
# -- version >= 5.0.0
choice = biased_trials.Choice(
session_path=self.biased_ge5['path']).extract(save=False)[0]
self.assertTrue(isinstance(choice, np.ndarray))
data = raw.load_data(self.biased_ge5['path'])
trial_nogo = np.array(
[~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])
for t in data])
if any(trial_nogo):
self.assertTrue(all(choice[trial_nogo]) == 0)
def test_get_repNum(self):
# TODO: Test its sawtooth
# TRAINING SESSIONS
rn = training_trials.RepNum(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(3):
self.assertTrue(i in rn)
# -- version >= 5.0.0
rn = training_trials.RepNum(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rn, np.ndarray))
for i in range(4):
self.assertTrue(i in rn)
# BIASED SESSIONS have no repeted trials
def test_get_rewardVolume(self):
# TRAINING SESSIONS
rv = training_trials.RewardVolume(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# -- version >= 5.0.0
rv = training_trials.RewardVolume(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# BIASED SESSIONS
rv = biased_trials.RewardVolume(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
# -- version >= 5.0.0
rv = biased_trials.RewardVolume(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rv, np.ndarray))
# Test if all non zero rewards are of the same value
self.assertTrue(all([x == max(rv) for x in rv if x != 0]))
def test_get_feedback_times_ge5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_feedback_times_lt5(self):
# TRAINING SESSIONS
ft = training_trials.FeedbackTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
# BIASED SESSIONS
ft = biased_trials.FeedbackTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(ft, np.ndarray))
def test_get_stimOnTrigger_times(self):
# TRAINING SESSIONS
sott = training_trials.StimOnTriggerTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = training_trials.StimOnTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# BIASED SESSIONS
sott = biased_trials.StimOnTriggerTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
# -- version >= 5.0.0
sott = biased_trials.StimOnTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(sott, np.ndarray))
def test_get_stimOn_times_lt5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_get_stimOn_times_ge5(self):
# TRAINING SESSIONS
st = training_trials.StimOnTimes_deprecated(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnTimes_deprecated(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st, np.ndarray))
def test_stimOnOffFreeze_times(self):
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# TRAINING SESSIONS
st = training_trials.StimOnOffFreezeTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
# BIASED SESSIONS
st = biased_trials.StimOnOffFreezeTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(st[0], np.ndarray))
def test_get_intervals(self):
# TRAINING SESSIONS
di = training_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = training_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# BIASED SESSIONS
di = biased_trials.Intervals(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
# -- version >= 5.0.0
di = biased_trials.Intervals(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(di, np.ndarray))
self.assertFalse(np.isnan(di).all())
def test_get_response_times(self):
# TRAINING SESSIONS
rt = training_trials.ResponseTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = training_trials.ResponseTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# BIASED SESSIONS
rt = biased_trials.ResponseTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
# -- version >= 5.0.0
rt = biased_trials.ResponseTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(rt, np.ndarray))
def test_get_goCueTrigger_times(self):
# TRAINING SESSIONS
data = raw.load_data(self.training_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = training_trials.GoCueTriggerTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
# BIASED SESSIONS
data = raw.load_data(self.biased_lt5['path'])
gct = np.array([tr['behavior_data']['States timestamps']
['closed_loop'][0][0] for tr in data])
self.assertTrue(isinstance(gct, np.ndarray))
# -- version >= 5.0.0
gct = biased_trials.GoCueTriggerTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gct, np.ndarray))
def test_get_goCueOnset_times(self):
# TRAINING SESSIONS
gcot = training_trials.GoCueTimes(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertTrue(np.all(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = training_trials.GoCueTimes(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 12)
# BIASED SESSIONS
gcot = biased_trials.GoCueTimes(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 4)
# -- version >= 5.0.0
gcot = biased_trials.GoCueTimes(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(gcot, np.ndarray))
self.assertFalse(np.any(np.isnan(gcot)))
self.assertTrue(gcot.size != 0 or gcot.size == 8)
def test_get_included_trials_lt5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials_ge5(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
def test_get_included_trials(self):
# TRAINING SESSIONS
it = training_trials.IncludedTrials(
self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = training_trials.IncludedTrials(
self.training_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
# BIASED SESSIONS
it = biased_trials.IncludedTrials(
self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]
self.assertTrue(isinstance(it, np.ndarray))
# -- version >= 5.0.0
it = biased_trials.IncludedTrials(
self.biased_ge5['path']).extract()[0]
self.assertTrue(isinstance(it, np.ndarray))
@wheelMoves_fixture
def test_extract_all(self):
# TRAINING SESSIONS
# Expect an error raised because no wheel moves were present in test data
with self.assertRaises(ValueError) as ex:
training_trials.extract_all(
self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))
# -- version >= 5.0.0
out, files = training_trials.extract_all(self.training_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
# BIASED SESSIONS
# The new trials extractor additionally extracts the wheel data and this fails for the < 5.0
# test data so we will stub the wheel extractor
with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:
Wheel.var_names = tuple()
Wheel().extract.return_value = ({}, [])
out, files = biased_trials.extract_all(
self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)
self.assertEqual(15, len(out))
self.assertTrue(all(map(Path.exists, files)))
# -- version >= 5.0.0
out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)
self.assertEqual(19, len(out))
self.assertTrue(all(map(Path.exists, files)))
def test_encoder_positions_clock_reset(self):
# TRAINING SESSIONS
# only for training?
path = self.training_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])
self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))
self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))
def test_encoder_positions_clock_errors(self):
# here we test for 2 kinds of file corruption that happen
# 1/2 the first sample time is corrupt and absurdly high and should be discarded
# 2/2 2 samples are swapped and need to be swapped backk
path = self.biased_lt5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_lt5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
# -- version >= 5.0.0
path = self.biased_ge5['path'] / "raw_behavior_data"
path = next(path.glob("_iblrig_encoderPositions.raw*.ssv"), None)
dy = raw._load_encoder_positions_file_ge5(path)
self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))
def test_wheel_folders(self):
# the wheel folder contains other errors in bpod output that had to be addressed
for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_lt5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):
df = raw._load_encoder_positions_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):
df = raw._load_encoder_events_file_ge5(wf)
self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))
def test_load_encoder_positions(self):
raw.load_encoder_positions(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.training_ge5['path'])
raw.load_encoder_positions(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_positions(self.biased_ge5['path'])
def test_load_encoder_events(self):
raw.load_encoder_events(self.training_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.training_ge5['path'])
raw.load_encoder_events(self.biased_lt5['path'],
settings={'IBLRIG_VERSION_TAG': '4.9.9'})
raw.load_encoder_events(self.biased_ge5['path'])
def test_size_outputs(self):
# check the output dimensions
# VERSION >= 5.0.0
from ibllib.io.extractors.bpod_trials import extract_all
extract_all(self.training_ge5['path'])
trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
extract_all(self.biased_ge5['path'])
trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# VERSION < 5.0.0
# for these test data there are no wheel moves so let's mock the output
mock_data = {
'intervals': np.array([[0, 1], ]),
'peakAmplitude': np.array([1, 1]),
'peakVelocity_times': np.array([1, 1])}
function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'
# Training
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.training_lt5['path'])
trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
# Biased
with unittest.mock.patch(function_name, return_value=mock_data):
extract_all(self.biased_lt5['path'])
trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')
self.assertTrue(alfio.check_dimensions(trials) == 0)
def tearDown(self):
for f in self.main_path.rglob('_ibl_log.*.log'):
f.unlink()
[x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]
[x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]
[x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]
[x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]
class TestSyncWheelBpod(unittest.TestCase):
def test_sync_bpod_bonsai_poor_quality_timestamps(self):
sync_trials_robust = raw.sync_trials_robust
drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift
np.random.seed(seed=784)
t0_full = np.cumsum(np.random.rand(50)) + .001
t1_full = np.polyval(drift_pol, t0_full) + t0_full
t0 = t0_full.copy()
t1 = t1_full.copy()
t0_, t1_ = sync_trials_robust(t0, t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[:-1])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, t1[1:])
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[1:], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0[:-1], t1)
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))
assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)
class TestWheelLoaders(unittest.TestCase):
def setUp(self) -> None:
self.main_path = Path(__file__).parent
def test_encoder_events_corrupt(self):
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_lt5(file_events)
self.assertTrue(dy.size > 6)
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):
dy = raw._load_encoder_events_file_ge5(file_events)
self.assertTrue(dy.size > 6)
def test_encoder_positions_corrupts(self):
path = self.main_path.joinpath('data', 'wheel', 'ge5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_ge5(file_position)
self.assertTrue(dy.size > 18)
path = self.main_path.joinpath('data', 'wheel', 'lt5')
for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):
dy = raw._load_encoder_positions_file_lt5(file_position)
self.assertTrue(dy.size > 18)
class MockExtracor(BaseExtractor):
save_names = (
"some_file.csv",
"some_file.tsv",
"some_file.ssv",
"some_file.npy",
)
var_names = (
"csv",
"ssv",
"tsv",
"npy",
)
def _extract(self, **kwargs) -> tuple:
csv = pd.DataFrame([1, 2, 3])
ssv = pd.DataFrame([1, 2, 3])
tsv = pd.DataFrame([1, 2, 3])
npy = np.array([1, 2, 3])
return (csv, ssv, tsv, npy)
class TestBaseExtractorSavingMethods(unittest.TestCase):
def setUp(self) -> None:
self.tempdir = tempfile.TemporaryDirectory()
self.session_path = self.tempdir.name
# self.addClassCleanup(tempdir.cleanup) # py3.8
self.mock_extractor = MockExtracor(self.session_path)
def test_saving_method(self):
data, paths = self.mock_extractor.extract(save=True)
self.assertTrue(all([x.exists() for x in paths]))
def tearDown(self):
self.tempdir.cleanup()
class TestCameraExtractors(unittest.TestCase):
def test_groom_pin_state(self):
# UNIT DATA
fps = 60
t_offset = 39.4
ts = np.arange(0, 10, 1 / fps) + t_offset
# Add drift
ts += np.full_like(ts, 1e-4).cumsum()
n_pulses = 2
pulse_width = 0.3
duty = 0.5
gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),
'polarities': np.ones(n_pulses * 2, dtype=np.int32)}
gpio['polarities'][1::2] = -1
aud_offset = 40.
audio = {'times': np.empty(n_pulses * 2),
'polarities': gpio['polarities']}
for p in range(n_pulses):
i = p * 2
rise = (pulse_width * p) + duty * p + 1
audio['times'][i] = aud_offset + rise
audio['times'][i + 1] = audio['times'][i] + pulse_width
rise += t_offset
gpio['indices'][i] = np.where(ts > rise)[0][0]
gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]
gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)
self.assertEqual(audio, audio_, 'Audio dict shouldn\'t be effected')
np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])
# Broken TTLs + extra TTL
delay = 0.08
pulse_width = 1e-5
t = audio['times'][0] + delay
audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))
audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)
audio['polarities'][1::2] = -1
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)
self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)
# One front shifted by a large amount
audio['times'][4] -= 0.3
gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)
self.assertTrue(np.all(gpio_['times'] == audio_['times']))
self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))
def test_attribute_times(self, display=False):
# Create two timestamp arrays at two different frequencies
tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm
tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm
tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front
tsb = np.sort(np.append(tsb, .41))
if display:
from ibllib.plots import vertical_lines
import matplotlib.pyplot as plt
vertical_lines(tsb, linestyle=':', color='r', label='tsb')
vertical_lines(tsa, linestyle=':', color='b', label='tsa')
plt.legend()
# Check with default args
matches = camera.attribute_times(tsa, tsb)
expected = np.array(
[0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,
22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,
45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
self.assertEqual(matches.size, tsb.size)
# Taking closest instead of first should change index of ambiguous front
matches = camera.attribute_times(tsa, tsb, take='nearest')
expected[np.r_[1:3]] = expected[1:3] + 1
np.testing.assert_array_equal(matches, expected)
# Taking first after should exclude many pulses
matches = camera.attribute_times(tsa, tsb, take='after')
missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,
22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]
expected[missing] = -1
np.testing.assert_array_equal(matches, expected)
# Lower tolerance
matches = camera.attribute_times(tsa, tsb, tol=0.05)
expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])
np.testing.assert_array_equal(matches[matches > -1], expected)
# Remove injective assert
matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')
expected = np.array(
[0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,
24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,
46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]
)
np.testing.assert_array_equal(matches, expected)
# Check input validation
with self.assertRaises(ValueError):
camera.attribute_times(tsa, tsb, injective=False, take='closest')
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
|
flexible
|
{
"blob_id": "f17d33f1d035da42dc9a2b4c0c60beefc6a48dea",
"index": 64,
"step-1": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n <mask token>\n <mask token>\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <mask token>\n <mask token>\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n <mask token>\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n <mask token>\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <mask token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n <mask token>\n <mask token>\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n <mask token>\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n <mask token>\n <mask token>\n <mask token>\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n <mask token>\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' /\n 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' /\n 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' /\n 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' /\n 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5[\n 'path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path'])\n )\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5[\n 'path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path'])\n )\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n\n def test_get_feedbackType(self):\n ft = training_trials.FeedbackType(self.training_lt5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = training_trials.FeedbackType(self.training_ge5['path']).extract()[\n 0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n ft = biased_trials.FeedbackType(self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n cl, cr = training_trials.ContrastLR(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = training_trials.ContrastLR(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n cl, cr = biased_trials.ContrastLR(self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([(np.sign(x) >= 0) for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([(np.sign(x) >= 0) for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n pl = training_trials.ProbabilityLeft(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = training_trials.ProbabilityLeft(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n pl = biased_trials.ProbabilityLeft(self.biased_lt5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n pl = biased_trials.ProbabilityLeft(self.biased_ge5['path']).extract()[0\n ]\n self.assertTrue(isinstance(pl, np.ndarray))\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([(x in probs) for x in pl]) == len(pl))\n\n def test_get_choice(self):\n choice = training_trials.Choice(session_path=self.training_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = training_trials.Choice(session_path=self.training_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_lt5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n choice = biased_trials.Choice(session_path=self.biased_ge5['path']\n ).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array([(~np.isnan(t['behavior_data'][\n 'States timestamps']['no_go'][0][0])) for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n rn = training_trials.RepNum(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n rn = training_trials.RepNum(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n def test_get_rewardVolume(self):\n rv = training_trials.RewardVolume(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = training_trials.RewardVolume(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(rv, np.ndarray))\n rv = biased_trials.RewardVolume(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n rv = biased_trials.RewardVolume(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n self.assertTrue(all([(x == max(rv)) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n ft = training_trials.FeedbackTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n ft = training_trials.FeedbackTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n ft = biased_trials.FeedbackTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n sott = training_trials.StimOnTriggerTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = training_trials.StimOnTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n sott = biased_trials.StimOnTriggerTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n st = training_trials.StimOnTimes_deprecated(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n st = biased_trials.StimOnTimes_deprecated(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n st = training_trials.StimOnOffFreezeTimes(self.training_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_lt5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = training_trials.StimOnOffFreezeTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n st = biased_trials.StimOnOffFreezeTimes(self.biased_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n di = training_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = training_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n di = biased_trials.Intervals(self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n rt = training_trials.ResponseTimes(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = training_trials.ResponseTimes(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n rt = biased_trials.ResponseTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = training_trials.GoCueTriggerTimes(self.training_ge5['path']\n ).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps'][\n 'closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n gct = biased_trials.GoCueTriggerTimes(self.biased_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n gcot = training_trials.GoCueTimes(self.training_lt5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = training_trials.GoCueTimes(self.training_ge5['path']).extract()[\n 0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n gcot = biased_trials.GoCueTimes(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n gcot = biased_trials.GoCueTimes(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n it = training_trials.IncludedTrials(self.training_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = training_trials.IncludedTrials(self.training_ge5['path']).extract(\n )[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_lt5['path']).extract(\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n it = biased_trials.IncludedTrials(self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(self.training_lt5['path'], settings\n ={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty',\n str(ex.exception))\n out, files = training_trials.extract_all(self.training_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel'\n ) as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = {}, []\n out, files = biased_trials.extract_all(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n out, files = biased_trials.extract_all(self.biased_ge5['path'],\n save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n path = self.training_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206,\n 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n path = self.biased_lt5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n path = self.biased_ge5['path'] / 'raw_behavior_data'\n path = next(path.glob('_iblrig_encoderPositions.raw*.ssv'), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'\n ):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'], settings={\n 'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n mock_data = {'intervals': np.array([[0, 1]]), 'peakAmplitude': np.\n array([1, 1]), 'peakVelocity_times': np.array([1, 1])}\n function_name = (\n 'ibllib.io.extractors.training_wheel.extract_wheel_moves')\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf',\n object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object=\n 'trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.\n is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()\n ]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.\n is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()\n ]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-06, -20])\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + 0.001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) ->None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = ('some_file.csv', 'some_file.tsv', 'some_file.ssv',\n 'some_file.npy')\n var_names = 'csv', 'ssv', 'tsv', 'npy'\n\n def _extract(self, **kwargs) ->tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n return csv, ssv, tsv, npy\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n\n def setUp(self) ->None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n\n def test_groom_pin_state(self):\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n ts += np.full_like(ts, 0.0001).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.0\n audio = {'times': np.empty(n_pulses * 2), 'polarities': gpio[\n 'polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = pulse_width * p + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, \"Audio dict shouldn't be effected\")\n np.testing.assert_array_almost_equal(ts_[:4], [40.0, 40.016667, \n 40.033333, 40.05])\n delay = 0.08\n pulse_width = 1e-05\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t +\n pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff\n =0.005)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts,\n tolerance=0.1, min_diff=0.005)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41.0, 41.3])))\n\n def test_attribute_times(self, display=False):\n tsa = np.linspace(0, 60, 60 * 4)[:60]\n tsb = np.linspace(0, 60, 60 * 3)[:45]\n tsa = np.sort(np.append(tsa, 0.4))\n tsb = np.sort(np.append(tsb, 0.41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array([0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, \n 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45,\n 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n matches = camera.attribute_times(tsa, tsb, injective=False, take=\n 'nearest')\n expected = np.array([0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17,\n 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38,\n 40, 41, 42, 44, 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60])\n np.testing.assert_array_equal(matches, expected)\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\n<mask token>\n",
"step-5": "import functools\nimport shutil\nimport tempfile\nimport unittest\nimport unittest.mock\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\n\nimport one.alf.io as alfio\nfrom ibllib.io.extractors import training_trials, biased_trials, camera\nfrom ibllib.io import raw_data_loaders as raw\nfrom ibllib.io.extractors.base import BaseExtractor\n\n\ndef wheelMoves_fixture(func):\n \"\"\"Decorator to save some dummy wheelMoves ALF files for extraction tests\"\"\"\n @functools.wraps(func)\n def wrapper(obj=None):\n # Save some wheelMoves ALF files\n attr_list = ['training_lt5',\n 'training_ge5',\n 'biased_lt5',\n 'biased_ge5']\n alf_paths = [getattr(obj, p)['path'] / 'alf' for p in attr_list]\n n_trials = [getattr(obj, p)['ntrials'] for p in attr_list]\n for p, n in zip(alf_paths, n_trials):\n p.mkdir()\n np.save(str(p / '_ibl_wheelMoves.intervals.npy'), np.zeros((n, 2)))\n np.save(str(p / '_ibl_wheelMoves.peakAmplitude.npy'), np.zeros(n))\n\n # Run method\n func(obj)\n\n # Teardown; delete the files\n for p in alf_paths:\n shutil.rmtree(p)\n return wrapper\n\n\nclass TestExtractTrialData(unittest.TestCase):\n\n def setUp(self):\n self.main_path = Path(__file__).parent\n self.training_lt5 = {'path': self.main_path / 'data' / 'session_training_lt5'}\n self.biased_lt5 = {'path': self.main_path / 'data' / 'session_biased_lt5'}\n self.training_ge5 = {'path': self.main_path / 'data' / 'session_training_ge5'}\n self.biased_ge5 = {'path': self.main_path / 'data' / 'session_biased_ge5'}\n self.training_lt5['ntrials'] = len(raw.load_data(self.training_lt5['path']))\n self.biased_lt5['ntrials'] = len(raw.load_data(self.biased_lt5['path']))\n self.training_ge5['ntrials'] = len(raw.load_data(self.training_ge5['path']))\n self.biased_ge5['ntrials'] = len(raw.load_data(self.biased_ge5['path']))\n # turn off logging for unit testing as we will purposedly go into warning/error cases\n self.wheel_ge5_path = self.main_path / 'data' / 'wheel_ge5'\n self.wheel_lt5_path = self.main_path / 'data' / 'wheel_lt5'\n # Save some dummy wheel moves data for trial firstMovement_times extraction\n\n def test_get_feedbackType(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackType(\n self.training_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = training_trials.FeedbackType(\n self.training_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.training_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackType(\n self.biased_lt5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_lt5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n # -- version >= 5.0.0\n ft = biased_trials.FeedbackType(\n self.biased_ge5['path']).extract()[0]\n self.assertEqual(ft.size, self.biased_ge5['ntrials'])\n # check if no 0's in feedbackTypes\n self.assertFalse(ft[ft == 0].size > 0)\n\n def test_get_contrastLR(self):\n # TRAINING SESSIONS\n cl, cr = training_trials.ContrastLR(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = training_trials.ContrastLR(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n # BIASED SESSIONS\n cl, cr = biased_trials.ContrastLR(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n # -- version >= 5.0.0\n cl, cr = biased_trials.ContrastLR(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(all([np.sign(x) >= 0 for x in cl if ~np.isnan(x)]))\n self.assertTrue(all([np.sign(x) >= 0 for x in cr if ~np.isnan(x)]))\n self.assertTrue(sum(np.isnan(cl)) + sum(np.isnan(cr)) == len(cl))\n self.assertTrue(sum(~np.isnan(cl)) + sum(~np.isnan(cr)) == len(cl))\n\n def test_get_probabilityLeft(self):\n # TRAINING SESSIONS\n pl = training_trials.ProbabilityLeft(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # -- version >= 5.0.0\n pl = training_trials.ProbabilityLeft(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n\n # BIASED SESSIONS\n pl = biased_trials.ProbabilityLeft(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_lt5['path'])\n if md:\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n # -- version >= 5.0.0\n pl = biased_trials.ProbabilityLeft(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(pl, np.ndarray))\n # Test if only probs that are in prob set\n md = raw.load_settings(self.biased_ge5['path'])\n probs = md['BLOCK_PROBABILITY_SET']\n probs.append(0.5)\n self.assertTrue(sum([x in probs for x in pl]) == len(pl))\n\n def test_get_choice(self):\n # TRAINING SESSIONS\n choice = training_trials.Choice(\n session_path=self.training_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = training_trials.Choice(\n session_path=self.training_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.training_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n # BIASED SESSIONS\n choice = biased_trials.Choice(\n session_path=self.biased_lt5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_lt5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n # -- version >= 5.0.0\n choice = biased_trials.Choice(\n session_path=self.biased_ge5['path']).extract(save=False)[0]\n self.assertTrue(isinstance(choice, np.ndarray))\n data = raw.load_data(self.biased_ge5['path'])\n trial_nogo = np.array(\n [~np.isnan(t['behavior_data']['States timestamps']['no_go'][0][0])\n for t in data])\n if any(trial_nogo):\n self.assertTrue(all(choice[trial_nogo]) == 0)\n\n def test_get_repNum(self):\n # TODO: Test its sawtooth\n # TRAINING SESSIONS\n rn = training_trials.RepNum(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(3):\n self.assertTrue(i in rn)\n # -- version >= 5.0.0\n rn = training_trials.RepNum(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rn, np.ndarray))\n for i in range(4):\n self.assertTrue(i in rn)\n\n # BIASED SESSIONS have no repeted trials\n\n def test_get_rewardVolume(self):\n # TRAINING SESSIONS\n rv = training_trials.RewardVolume(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # -- version >= 5.0.0\n rv = training_trials.RewardVolume(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n\n # BIASED SESSIONS\n rv = biased_trials.RewardVolume(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n # -- version >= 5.0.0\n rv = biased_trials.RewardVolume(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rv, np.ndarray))\n # Test if all non zero rewards are of the same value\n self.assertTrue(all([x == max(rv) for x in rv if x != 0]))\n\n def test_get_feedback_times_ge5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_feedback_times_lt5(self):\n # TRAINING SESSIONS\n ft = training_trials.FeedbackTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n # BIASED SESSIONS\n ft = biased_trials.FeedbackTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(ft, np.ndarray))\n\n def test_get_stimOnTrigger_times(self):\n # TRAINING SESSIONS\n sott = training_trials.StimOnTriggerTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = training_trials.StimOnTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # BIASED SESSIONS\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n # -- version >= 5.0.0\n sott = biased_trials.StimOnTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(sott, np.ndarray))\n\n def test_get_stimOn_times_lt5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_get_stimOn_times_ge5(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnTimes_deprecated(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnTimes_deprecated(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st, np.ndarray))\n\n def test_stimOnOffFreeze_times(self):\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # TRAINING SESSIONS\n st = training_trials.StimOnOffFreezeTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n # BIASED SESSIONS\n st = biased_trials.StimOnOffFreezeTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(st[0], np.ndarray))\n\n def test_get_intervals(self):\n # TRAINING SESSIONS\n di = training_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = training_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n # BIASED SESSIONS\n di = biased_trials.Intervals(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n # -- version >= 5.0.0\n di = biased_trials.Intervals(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(di, np.ndarray))\n self.assertFalse(np.isnan(di).all())\n\n def test_get_response_times(self):\n # TRAINING SESSIONS\n rt = training_trials.ResponseTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = training_trials.ResponseTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n # BIASED SESSIONS\n rt = biased_trials.ResponseTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n # -- version >= 5.0.0\n rt = biased_trials.ResponseTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(rt, np.ndarray))\n\n def test_get_goCueTrigger_times(self):\n # TRAINING SESSIONS\n data = raw.load_data(self.training_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = training_trials.GoCueTriggerTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n # BIASED SESSIONS\n data = raw.load_data(self.biased_lt5['path'])\n gct = np.array([tr['behavior_data']['States timestamps']\n ['closed_loop'][0][0] for tr in data])\n self.assertTrue(isinstance(gct, np.ndarray))\n # -- version >= 5.0.0\n gct = biased_trials.GoCueTriggerTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gct, np.ndarray))\n\n def test_get_goCueOnset_times(self):\n # TRAINING SESSIONS\n gcot = training_trials.GoCueTimes(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertTrue(np.all(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = training_trials.GoCueTimes(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 12)\n\n # BIASED SESSIONS\n gcot = biased_trials.GoCueTimes(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 4)\n # -- version >= 5.0.0\n gcot = biased_trials.GoCueTimes(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(gcot, np.ndarray))\n self.assertFalse(np.any(np.isnan(gcot)))\n self.assertTrue(gcot.size != 0 or gcot.size == 8)\n\n def test_get_included_trials_lt5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials_ge5(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n def test_get_included_trials(self):\n # TRAINING SESSIONS\n it = training_trials.IncludedTrials(\n self.training_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = training_trials.IncludedTrials(\n self.training_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n # BIASED SESSIONS\n it = biased_trials.IncludedTrials(\n self.biased_lt5['path']).extract(settings={'IBLRIG_VERSION_TAG': '4.9.9'})[0]\n self.assertTrue(isinstance(it, np.ndarray))\n # -- version >= 5.0.0\n it = biased_trials.IncludedTrials(\n self.biased_ge5['path']).extract()[0]\n self.assertTrue(isinstance(it, np.ndarray))\n\n @wheelMoves_fixture\n def test_extract_all(self):\n # TRAINING SESSIONS\n # Expect an error raised because no wheel moves were present in test data\n with self.assertRaises(ValueError) as ex:\n training_trials.extract_all(\n self.training_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertIn('_ibl_wheelMoves.intervals.npy appears to be empty', str(ex.exception))\n # -- version >= 5.0.0\n out, files = training_trials.extract_all(self.training_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n # BIASED SESSIONS\n # The new trials extractor additionally extracts the wheel data and this fails for the < 5.0\n # test data so we will stub the wheel extractor\n with unittest.mock.patch('ibllib.io.extractors.biased_trials.Wheel') as Wheel:\n Wheel.var_names = tuple()\n Wheel().extract.return_value = ({}, [])\n out, files = biased_trials.extract_all(\n self.biased_lt5['path'], settings={'IBLRIG_VERSION_TAG': '4.9.9'}, save=True)\n self.assertEqual(15, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n # -- version >= 5.0.0\n out, files = biased_trials.extract_all(self.biased_ge5['path'], save=True)\n self.assertEqual(19, len(out))\n self.assertTrue(all(map(Path.exists, files)))\n\n def test_encoder_positions_clock_reset(self):\n # TRAINING SESSIONS\n # only for training?\n path = self.training_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n dat = np.array([849736, 1532230, 1822449, 1833514, 1841566, 1848206, 1853979, 1859144])\n self.assertTrue(np.all(np.diff(dy['re_ts']) > 0))\n self.assertTrue(all(dy['re_ts'][6:] - 2 ** 32 - dat == 0))\n\n def test_encoder_positions_clock_errors(self):\n # here we test for 2 kinds of file corruption that happen\n # 1/2 the first sample time is corrupt and absurdly high and should be discarded\n # 2/2 2 samples are swapped and need to be swapped backk\n path = self.biased_lt5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_lt5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n # -- version >= 5.0.0\n path = self.biased_ge5['path'] / \"raw_behavior_data\"\n path = next(path.glob(\"_iblrig_encoderPositions.raw*.ssv\"), None)\n dy = raw._load_encoder_positions_file_ge5(path)\n self.assertTrue(np.all(np.diff(np.array(dy.re_ts)) > 0))\n\n def test_wheel_folders(self):\n # the wheel folder contains other errors in bpod output that had to be addressed\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_lt5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_lt5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderPositions*.raw*.ssv'):\n df = raw._load_encoder_positions_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n for wf in self.wheel_ge5_path.glob('_iblrig_encoderEvents*.raw*.ssv'):\n df = raw._load_encoder_events_file_ge5(wf)\n self.assertTrue(np.all(np.diff(np.array(df.re_ts)) > 0))\n\n def test_load_encoder_positions(self):\n raw.load_encoder_positions(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.training_ge5['path'])\n raw.load_encoder_positions(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_positions(self.biased_ge5['path'])\n\n def test_load_encoder_events(self):\n raw.load_encoder_events(self.training_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.training_ge5['path'])\n raw.load_encoder_events(self.biased_lt5['path'],\n settings={'IBLRIG_VERSION_TAG': '4.9.9'})\n raw.load_encoder_events(self.biased_ge5['path'])\n\n def test_size_outputs(self):\n # check the output dimensions\n # VERSION >= 5.0.0\n from ibllib.io.extractors.bpod_trials import extract_all\n extract_all(self.training_ge5['path'])\n trials = alfio.load_object(self.training_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n extract_all(self.biased_ge5['path'])\n trials = alfio.load_object(self.biased_ge5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # VERSION < 5.0.0\n # for these test data there are no wheel moves so let's mock the output\n mock_data = {\n 'intervals': np.array([[0, 1], ]),\n 'peakAmplitude': np.array([1, 1]),\n 'peakVelocity_times': np.array([1, 1])}\n function_name = 'ibllib.io.extractors.training_wheel.extract_wheel_moves'\n # Training\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.training_lt5['path'])\n trials = alfio.load_object(self.training_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n # Biased\n with unittest.mock.patch(function_name, return_value=mock_data):\n extract_all(self.biased_lt5['path'])\n trials = alfio.load_object(self.biased_lt5['path'] / 'alf', object='trials')\n self.assertTrue(alfio.check_dimensions(trials) == 0)\n\n def tearDown(self):\n for f in self.main_path.rglob('_ibl_log.*.log'):\n f.unlink()\n [x.unlink() for x in self.training_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_lt5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.training_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.unlink() for x in self.biased_ge5['path'].rglob('alf/*') if x.is_file()]\n [x.rmdir() for x in self.training_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_lt5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.training_ge5['path'].rglob('alf/') if x.is_dir()]\n [x.rmdir() for x in self.biased_ge5['path'].rglob('alf/') if x.is_dir()]\n\n\nclass TestSyncWheelBpod(unittest.TestCase):\n\n def test_sync_bpod_bonsai_poor_quality_timestamps(self):\n sync_trials_robust = raw.sync_trials_robust\n drift_pol = np.array([11 * 1e-6, -20]) # bpod starts 20 secs before with 10 ppm drift\n np.random.seed(seed=784)\n t0_full = np.cumsum(np.random.rand(50)) + .001\n t1_full = np.polyval(drift_pol, t0_full) + t0_full\n t0 = t0_full.copy()\n t1 = t1_full.copy()\n\n t0_, t1_ = sync_trials_robust(t0, t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[:-1])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, t1[1:])\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[1:], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0[:-1], t1)\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(t0, np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n t0_, t1_ = sync_trials_robust(np.delete(t0, 12), np.delete(t1, 24))\n assert np.allclose(t1_, np.polyval(drift_pol, t0_) + t0_)\n\n\nclass TestWheelLoaders(unittest.TestCase):\n\n def setUp(self) -> None:\n self.main_path = Path(__file__).parent\n\n def test_encoder_events_corrupt(self):\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_lt5(file_events)\n self.assertTrue(dy.size > 6)\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_events in path.rglob('_iblrig_encoderEvents.raw.*'):\n dy = raw._load_encoder_events_file_ge5(file_events)\n self.assertTrue(dy.size > 6)\n\n def test_encoder_positions_corrupts(self):\n path = self.main_path.joinpath('data', 'wheel', 'ge5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_ge5(file_position)\n self.assertTrue(dy.size > 18)\n path = self.main_path.joinpath('data', 'wheel', 'lt5')\n for file_position in path.rglob('_iblrig_encoderPositions.raw.*'):\n dy = raw._load_encoder_positions_file_lt5(file_position)\n self.assertTrue(dy.size > 18)\n\n\nclass MockExtracor(BaseExtractor):\n save_names = (\n \"some_file.csv\",\n \"some_file.tsv\",\n \"some_file.ssv\",\n \"some_file.npy\",\n )\n var_names = (\n \"csv\",\n \"ssv\",\n \"tsv\",\n \"npy\",\n )\n\n def _extract(self, **kwargs) -> tuple:\n csv = pd.DataFrame([1, 2, 3])\n ssv = pd.DataFrame([1, 2, 3])\n tsv = pd.DataFrame([1, 2, 3])\n npy = np.array([1, 2, 3])\n\n return (csv, ssv, tsv, npy)\n\n\nclass TestBaseExtractorSavingMethods(unittest.TestCase):\n def setUp(self) -> None:\n self.tempdir = tempfile.TemporaryDirectory()\n self.session_path = self.tempdir.name\n # self.addClassCleanup(tempdir.cleanup) # py3.8\n self.mock_extractor = MockExtracor(self.session_path)\n\n def test_saving_method(self):\n data, paths = self.mock_extractor.extract(save=True)\n self.assertTrue(all([x.exists() for x in paths]))\n\n def tearDown(self):\n self.tempdir.cleanup()\n\n\nclass TestCameraExtractors(unittest.TestCase):\n def test_groom_pin_state(self):\n # UNIT DATA\n fps = 60\n t_offset = 39.4\n ts = np.arange(0, 10, 1 / fps) + t_offset\n # Add drift\n ts += np.full_like(ts, 1e-4).cumsum()\n n_pulses = 2\n pulse_width = 0.3\n duty = 0.5\n gpio = {'indices': np.empty(n_pulses * 2, dtype=np.int32),\n 'polarities': np.ones(n_pulses * 2, dtype=np.int32)}\n gpio['polarities'][1::2] = -1\n aud_offset = 40.\n audio = {'times': np.empty(n_pulses * 2),\n 'polarities': gpio['polarities']}\n for p in range(n_pulses):\n i = p * 2\n rise = (pulse_width * p) + duty * p + 1\n audio['times'][i] = aud_offset + rise\n audio['times'][i + 1] = audio['times'][i] + pulse_width\n rise += t_offset\n gpio['indices'][i] = np.where(ts > rise)[0][0]\n gpio['indices'][i + 1] = np.where(ts > rise + pulse_width)[0][0]\n\n gpio_, audio_, ts_ = camera.groom_pin_state(gpio, audio, ts)\n self.assertEqual(audio, audio_, 'Audio dict shouldn\\'t be effected')\n np.testing.assert_array_almost_equal(ts_[:4], [40., 40.016667, 40.033333, 40.05])\n\n # Broken TTLs + extra TTL\n delay = 0.08\n pulse_width = 1e-5\n t = audio['times'][0] + delay\n audio['times'] = np.sort(np.append(audio['times'], [t, t + pulse_width, 80]))\n audio['polarities'] = np.ones(audio['times'].shape, dtype=np.int32)\n audio['polarities'][1::2] = -1\n\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, min_diff=5e-3)\n self.assertTrue(audio_['times'].size == gpio_['times'].size == 4)\n\n # One front shifted by a large amount\n audio['times'][4] -= 0.3\n gpio_, audio_, _ = camera.groom_pin_state(gpio, audio, ts, tolerance=.1, min_diff=5e-3)\n self.assertTrue(np.all(gpio_['times'] == audio_['times']))\n self.assertTrue(np.all(gpio_['times'] == np.array([41., 41.3])))\n\n def test_attribute_times(self, display=False):\n # Create two timestamp arrays at two different frequencies\n tsa = np.linspace(0, 60, 60 * 4)[:60] # 240bpm\n tsb = np.linspace(0, 60, 60 * 3)[:45] # 180bpm\n tsa = np.sort(np.append(tsa, .4)) # Add ambiguous front\n tsb = np.sort(np.append(tsb, .41))\n if display:\n from ibllib.plots import vertical_lines\n import matplotlib.pyplot as plt\n vertical_lines(tsb, linestyle=':', color='r', label='tsb')\n vertical_lines(tsa, linestyle=':', color='b', label='tsa')\n plt.legend()\n\n # Check with default args\n matches = camera.attribute_times(tsa, tsb)\n expected = np.array(\n [0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21,\n 22, 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44,\n 45, 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n self.assertEqual(matches.size, tsb.size)\n\n # Taking closest instead of first should change index of ambiguous front\n matches = camera.attribute_times(tsa, tsb, take='nearest')\n expected[np.r_[1:3]] = expected[1:3] + 1\n np.testing.assert_array_equal(matches, expected)\n\n # Taking first after should exclude many pulses\n matches = camera.attribute_times(tsa, tsb, take='after')\n missing = [0, 4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20,\n 22, 23, 25, 26, 28, 29, 31, 32, 34, 35, 37, 40, 43]\n expected[missing] = -1\n np.testing.assert_array_equal(matches, expected)\n\n # Lower tolerance\n matches = camera.attribute_times(tsa, tsb, tol=0.05)\n expected = np.array([0, 2, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57])\n np.testing.assert_array_equal(matches[matches > -1], expected)\n\n # Remove injective assert\n matches = camera.attribute_times(tsa, tsb, injective=False, take='nearest')\n expected = np.array(\n [0, 2, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22,\n 24, 25, 26, 28, 29, 30, 32, 33, 34, 36, 37, 38, 40, 41, 42, 44, 45,\n 46, 48, 49, -1, 52, 53, -1, 56, 57, -1, 60]\n )\n np.testing.assert_array_equal(matches, expected)\n\n # Check input validation\n with self.assertRaises(ValueError):\n camera.attribute_times(tsa, tsb, injective=False, take='closest')\n\n\nif __name__ == \"__main__\":\n unittest.main(exit=False, verbosity=2)\n",
"step-ids": [
27,
34,
37,
45,
49
]
}
|
[
27,
34,
37,
45,
49
] |
# question 1d
# points: 6
import sys
import numpy as np
from astropy.stats import kuiper
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import handin2 as nur
def main():
seed = 8912312
np.random.seed(8912312)
u = 0
sigma = 1
cdf = nur.gaussian_cdf
num_samples = np.logspace(1,5,num=50)
sample_size = int(1e5)
my_k = np.zeros(50)
my_p = np.zeros(50)
pyth_k = np.zeros(50)
pyth_p = np.zeros(50)
# random number params
x = np.zeros(sample_size)
y = np.zeros(sample_size)
xn = np.zeros(sample_size)
yn = np.zeros(sample_size)
# want to generate one sample of 1e5 numbers then take slices
for i in range(sample_size):
x[i],seed = nur.rng(seed)
xn[i],yn[i] = nur.normalize_random_distribution(x[i],y[i],u,sigma)
for i,s in enumerate(num_samples):
# slice of x at given s
x_s = xn[:int(s)]
x_k = x[:int(s)]
my_k[i],my_p[i] = nur.kuiper_test(x_s,cdf)
pyth_k[i],pyth_p[i] = kuiper(x_k)
# plotting procedure
plt.figure(1,figsize=(7,5))
plt.plot(num_samples,my_k,c='b',ls='None',marker='.',markersize=1,
label='my kuiper test')
plt.plot(num_samples,pyth_k,c='r',ls='None',marker='s',markersize=1,
label='astropy kuiper test')
plt.xscale('log')
plt.xlabel("number of points")
plt.ylabel("kuiper statistic")
plt.legend()
plt.savefig('./plots/kuiper_stat.png',format='png',dpi=300)
plt.figure(2,figsize=(7,5))
plt.plot(num_samples,my_p,c='b',label='my probabilities')
plt.plot(num_samples,pyth_p,c='r',label='astropy probabilities')
plt.xscale('log')
plt.xlabel('sample size')
plt.ylabel('probabilties')
plt.legend(frameon=False,loc='best')
plt.savefig('./plots/k_prob.png',format='png',dpi=300)
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "0158141832423b567f252e38640e384cdf340f8b",
"index": 7105,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n seed = 8912312\n np.random.seed(8912312)\n u = 0\n sigma = 1\n cdf = nur.gaussian_cdf\n num_samples = np.logspace(1, 5, num=50)\n sample_size = int(100000.0)\n my_k = np.zeros(50)\n my_p = np.zeros(50)\n pyth_k = np.zeros(50)\n pyth_p = np.zeros(50)\n x = np.zeros(sample_size)\n y = np.zeros(sample_size)\n xn = np.zeros(sample_size)\n yn = np.zeros(sample_size)\n for i in range(sample_size):\n x[i], seed = nur.rng(seed)\n xn[i], yn[i] = nur.normalize_random_distribution(x[i], y[i], u, sigma)\n for i, s in enumerate(num_samples):\n x_s = xn[:int(s)]\n x_k = x[:int(s)]\n my_k[i], my_p[i] = nur.kuiper_test(x_s, cdf)\n pyth_k[i], pyth_p[i] = kuiper(x_k)\n plt.figure(1, figsize=(7, 5))\n plt.plot(num_samples, my_k, c='b', ls='None', marker='.', markersize=1,\n label='my kuiper test')\n plt.plot(num_samples, pyth_k, c='r', ls='None', marker='s', markersize=\n 1, label='astropy kuiper test')\n plt.xscale('log')\n plt.xlabel('number of points')\n plt.ylabel('kuiper statistic')\n plt.legend()\n plt.savefig('./plots/kuiper_stat.png', format='png', dpi=300)\n plt.figure(2, figsize=(7, 5))\n plt.plot(num_samples, my_p, c='b', label='my probabilities')\n plt.plot(num_samples, pyth_p, c='r', label='astropy probabilities')\n plt.xscale('log')\n plt.xlabel('sample size')\n plt.ylabel('probabilties')\n plt.legend(frameon=False, loc='best')\n plt.savefig('./plots/k_prob.png', format='png', dpi=300)\n\n\n<mask token>\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\n\n\ndef main():\n seed = 8912312\n np.random.seed(8912312)\n u = 0\n sigma = 1\n cdf = nur.gaussian_cdf\n num_samples = np.logspace(1, 5, num=50)\n sample_size = int(100000.0)\n my_k = np.zeros(50)\n my_p = np.zeros(50)\n pyth_k = np.zeros(50)\n pyth_p = np.zeros(50)\n x = np.zeros(sample_size)\n y = np.zeros(sample_size)\n xn = np.zeros(sample_size)\n yn = np.zeros(sample_size)\n for i in range(sample_size):\n x[i], seed = nur.rng(seed)\n xn[i], yn[i] = nur.normalize_random_distribution(x[i], y[i], u, sigma)\n for i, s in enumerate(num_samples):\n x_s = xn[:int(s)]\n x_k = x[:int(s)]\n my_k[i], my_p[i] = nur.kuiper_test(x_s, cdf)\n pyth_k[i], pyth_p[i] = kuiper(x_k)\n plt.figure(1, figsize=(7, 5))\n plt.plot(num_samples, my_k, c='b', ls='None', marker='.', markersize=1,\n label='my kuiper test')\n plt.plot(num_samples, pyth_k, c='r', ls='None', marker='s', markersize=\n 1, label='astropy kuiper test')\n plt.xscale('log')\n plt.xlabel('number of points')\n plt.ylabel('kuiper statistic')\n plt.legend()\n plt.savefig('./plots/kuiper_stat.png', format='png', dpi=300)\n plt.figure(2, figsize=(7, 5))\n plt.plot(num_samples, my_p, c='b', label='my probabilities')\n plt.plot(num_samples, pyth_p, c='r', label='astropy probabilities')\n plt.xscale('log')\n plt.xlabel('sample size')\n plt.ylabel('probabilties')\n plt.legend(frameon=False, loc='best')\n plt.savefig('./plots/k_prob.png', format='png', dpi=300)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import sys\nimport numpy as np\nfrom astropy.stats import kuiper\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport handin2 as nur\n\n\ndef main():\n seed = 8912312\n np.random.seed(8912312)\n u = 0\n sigma = 1\n cdf = nur.gaussian_cdf\n num_samples = np.logspace(1, 5, num=50)\n sample_size = int(100000.0)\n my_k = np.zeros(50)\n my_p = np.zeros(50)\n pyth_k = np.zeros(50)\n pyth_p = np.zeros(50)\n x = np.zeros(sample_size)\n y = np.zeros(sample_size)\n xn = np.zeros(sample_size)\n yn = np.zeros(sample_size)\n for i in range(sample_size):\n x[i], seed = nur.rng(seed)\n xn[i], yn[i] = nur.normalize_random_distribution(x[i], y[i], u, sigma)\n for i, s in enumerate(num_samples):\n x_s = xn[:int(s)]\n x_k = x[:int(s)]\n my_k[i], my_p[i] = nur.kuiper_test(x_s, cdf)\n pyth_k[i], pyth_p[i] = kuiper(x_k)\n plt.figure(1, figsize=(7, 5))\n plt.plot(num_samples, my_k, c='b', ls='None', marker='.', markersize=1,\n label='my kuiper test')\n plt.plot(num_samples, pyth_k, c='r', ls='None', marker='s', markersize=\n 1, label='astropy kuiper test')\n plt.xscale('log')\n plt.xlabel('number of points')\n plt.ylabel('kuiper statistic')\n plt.legend()\n plt.savefig('./plots/kuiper_stat.png', format='png', dpi=300)\n plt.figure(2, figsize=(7, 5))\n plt.plot(num_samples, my_p, c='b', label='my probabilities')\n plt.plot(num_samples, pyth_p, c='r', label='astropy probabilities')\n plt.xscale('log')\n plt.xlabel('sample size')\n plt.ylabel('probabilties')\n plt.legend(frameon=False, loc='best')\n plt.savefig('./plots/k_prob.png', format='png', dpi=300)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "# question 1d\n# points: 6\n\nimport sys\n\nimport numpy as np\nfrom astropy.stats import kuiper\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nimport handin2 as nur\n\ndef main():\n seed = 8912312\n np.random.seed(8912312)\n u = 0\n sigma = 1\n cdf = nur.gaussian_cdf\n num_samples = np.logspace(1,5,num=50)\n sample_size = int(1e5)\n my_k = np.zeros(50)\n my_p = np.zeros(50)\n pyth_k = np.zeros(50)\n pyth_p = np.zeros(50)\n\n # random number params\n x = np.zeros(sample_size)\n y = np.zeros(sample_size)\n xn = np.zeros(sample_size)\n yn = np.zeros(sample_size)\n\n # want to generate one sample of 1e5 numbers then take slices\n for i in range(sample_size):\n x[i],seed = nur.rng(seed)\n xn[i],yn[i] = nur.normalize_random_distribution(x[i],y[i],u,sigma)\n\n for i,s in enumerate(num_samples):\n # slice of x at given s\n x_s = xn[:int(s)]\n x_k = x[:int(s)]\n my_k[i],my_p[i] = nur.kuiper_test(x_s,cdf)\n pyth_k[i],pyth_p[i] = kuiper(x_k)\n\n # plotting procedure\n plt.figure(1,figsize=(7,5))\n plt.plot(num_samples,my_k,c='b',ls='None',marker='.',markersize=1,\n label='my kuiper test')\n plt.plot(num_samples,pyth_k,c='r',ls='None',marker='s',markersize=1,\n label='astropy kuiper test')\n plt.xscale('log')\n plt.xlabel(\"number of points\")\n plt.ylabel(\"kuiper statistic\")\n plt.legend()\n plt.savefig('./plots/kuiper_stat.png',format='png',dpi=300)\n\n plt.figure(2,figsize=(7,5))\n plt.plot(num_samples,my_p,c='b',label='my probabilities')\n plt.plot(num_samples,pyth_p,c='r',label='astropy probabilities')\n plt.xscale('log')\n plt.xlabel('sample size')\n plt.ylabel('probabilties')\n plt.legend(frameon=False,loc='best')\n plt.savefig('./plots/k_prob.png',format='png',dpi=300)\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestNestedDict(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
<|reserved_special_token_0|>
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<|reserved_special_token_0|>
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
<|reserved_special_token_0|>
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
<|reserved_special_token_0|>
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
def test_file(self):
self.assertTrue(os.path.isfile(self.afile))
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
def test_set(self):
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[u'0002', u'topping',
u'5001', u'type'], dnow=dcopy)
value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': 'topless'})
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001',
'price'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': u'None', 'price':
'5.01'})
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001',
'type'], dnow=dcopy)
pprint(dchg)
argv = [35, 'topping', '5001']
value = self.nd.get(keys=argv, dnow=dchg)
self.assertEqual(value, {'type': 'topless'})
dcopy = copy.deepcopy(self.dfood)
dnew = {'id': 555, 'type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],
dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
pprint(value)
self.assertEqual(value, dnew)
dcopy = copy.deepcopy(self.dfood)
dnew = {'Type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],
dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price':
0.99, u'type': u'None'})
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<|reserved_special_token_1|>
import unittest
import json
import os
import copy
from nested.nested_dict import NestedDict
from pprint import pprint
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
def test_file(self):
self.assertTrue(os.path.isfile(self.afile))
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
# depth 0
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 1
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 2
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 3
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
def test_set(self):
# update the lastdict with new value of the same key
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[u'0002', u'topping', u'5001', u'type'], dnow=dcopy)
value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': 'topless'})
# update the lastdict with new key: value, but not new dict
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001', 'price'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': u'None', 'price': '5.01'})
# int key
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001', 'type'], dnow=dcopy)
pprint(dchg)
argv = [35, 'topping', '5001']
value = self.nd.get(keys=argv, dnow=dchg)
self.assertEqual(value, {'type': 'topless'})
# special condition value to be dict
dcopy = copy.deepcopy(self.dfood)
dnew = {'id': 555, 'type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
pprint(value)
self.assertEqual(value, dnew)
# without id
dcopy = copy.deepcopy(self.dfood)
dnew = {'Type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': 0.99, u'type': u'None'})
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
# d_original did not change
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
# dnow in parameters will be updated(!)
# self.assertEqual(d_original.keys(), ['hello1'])
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
# this is not shallow
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
# d_original did not change
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
# self.assertEqual(d_original.keys(), ['hello1'])
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
|
flexible
|
{
"blob_id": "f9a255a464b5f48a1a8be2e2887db721a92e7f4e",
"index": 1474,
"step-1": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n <mask token>\n <mask token>\n <mask token>\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n <mask token>\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n <mask token>\n <mask token>\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n <mask token>\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-3": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n <mask token>\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n <mask token>\n\n def test_create(self):\n keys = ['a', 'b', 'c']\n value = {u'd': 1}\n d = self.nd.create(value=value, keys=keys)\n dchg = {'a': {'b': {'c': {u'd': 1}}}}\n self.assertEqual(d, dchg)\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n\n def test_merge_shallow(self):\n d = {}\n dchg = {}\n du = self.nd.merge_shallow(dchg=dchg, dnow=d)\n self.assertEqual(du, d)\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n du = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(du, {'hello1': 1, 'hello2': 2})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over'}})\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-4": "<mask token>\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n\n def test_file(self):\n self.assertTrue(os.path.isfile(self.afile))\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n def test_set(self):\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[u'0002', u'topping',\n u'5001', u'type'], dnow=dcopy)\n value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': 'topless'})\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001',\n 'price'], dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': u'None', 'price':\n '5.01'})\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001',\n 'type'], dnow=dcopy)\n pprint(dchg)\n argv = [35, 'topping', '5001']\n value = self.nd.get(keys=argv, dnow=dchg)\n self.assertEqual(value, {'type': 'topless'})\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'id': 555, 'type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],\n dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n pprint(value)\n self.assertEqual(value, dnew)\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'Type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],\n dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': \n 0.99, u'type': u'None'})\n\n def test_create(self):\n keys = ['a', 'b', 'c']\n value = {u'd': 1}\n d = self.nd.create(value=value, keys=keys)\n dchg = {'a': {'b': {'c': {u'd': 1}}}}\n self.assertEqual(d, dchg)\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n\n def test_merge_shallow(self):\n d = {}\n dchg = {}\n du = self.nd.merge_shallow(dchg=dchg, dnow=d)\n self.assertEqual(du, d)\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n du = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(du, {'hello1': 1, 'hello2': 2})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over'}})\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-5": "import unittest\nimport json\nimport os\nimport copy\nfrom nested.nested_dict import NestedDict\nfrom pprint import pprint\n\n\nclass TestNestedDict(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n path = os.path.dirname(__file__)\n cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')\n cls.nd = NestedDict()\n cls.d = {'a': {'b': {'c': 'C'}}}\n with open(cls.afile, 'r') as fp:\n cls.dfood = json.load(fp)\n\n def test_file(self):\n self.assertTrue(os.path.isfile(self.afile))\n\n def test_dfood(self):\n self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])\n\n def test_get(self):\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)\n self.assertEqual(v, 'C')\n\n # depth 0\n dc = copy.deepcopy(self.d)\n items = ['x', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n # depth 1\n dc = copy.deepcopy(self.d)\n items = ['a', 'y', 'z']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n # depth 2\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'e']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n # depth 3\n dc = copy.deepcopy(self.d)\n items = ['a', 'b', 'c']\n dchg = self.nd.set(value='E', keys=items, dnow=dc)\n v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)\n self.assertEqual(v, 'E')\n\n def test_set(self):\n # update the lastdict with new value of the same key\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[u'0002', u'topping', u'5001', u'type'], dnow=dcopy)\n value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': 'topless'})\n\n # update the lastdict with new key: value, but not new dict\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001', 'price'], dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {'id': '5001', 'type': u'None', 'price': '5.01'})\n\n # int key\n dcopy = copy.deepcopy(self.dfood)\n dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001', 'type'], dnow=dcopy)\n pprint(dchg)\n argv = [35, 'topping', '5001']\n value = self.nd.get(keys=argv, dnow=dchg)\n self.assertEqual(value, {'type': 'topless'})\n\n # special condition value to be dict\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'id': 555, 'type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)\n\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n pprint(value)\n self.assertEqual(value, dnew)\n\n # without id\n dcopy = copy.deepcopy(self.dfood)\n dnew = {'Type': 'berry', 'price': 0.99}\n dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)\n value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)\n self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': 0.99, u'type': u'None'})\n\n def test_create(self):\n keys = ['a', 'b', 'c']\n value = {u'd': 1}\n d = self.nd.create(value=value, keys=keys)\n dchg = {'a': {'b': {'c': {u'd': 1}}}}\n self.assertEqual(d, dchg)\n\n def test_update(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n\n # d_original did not change\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n # dnow in parameters will be updated(!)\n # self.assertEqual(d_original.keys(), ['hello1'])\n\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n\n def test_merge_shallow(self):\n d = {}\n dchg = {}\n du = self.nd.merge_shallow(dchg=dchg, dnow=d)\n self.assertEqual(du, d)\n\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n du = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(du, {'hello1': 1, 'hello2': 2})\n\n # this is not shallow\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n\n d = self.nd.merge_shallow(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over'}})\n\n def test_update2(self):\n d_original = {'hello1': 1}\n dup = {'hello2': 2}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello1': 1, 'hello2': 2})\n\n # d_original did not change\n self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))\n # self.assertEqual(d_original.keys(), ['hello1'])\n\n value = self.nd.get(keys=['hello2'], dnow=d)\n self.assertEqual(value, 2)\n\n d_original = {'hello': 'to_override'}\n dup = {'hello': 'over'}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': 'over'})\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': 'over'}}\n d = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})\n\n value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)\n self.assertEqual(value, 1)\n\n d_original = {'hello': {'value': 'to_override', 'no_change': 1}}\n dup = {'hello': {'value': {}}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})\n\n d_original = {'hello': {'value': {}, 'no_change': 1}}\n dup = {'hello': {'value': 2}}\n dchg = self.nd.update2(dchg=dup, dnow=d_original)\n self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})\n",
"step-ids": [
3,
6,
8,
10,
12
]
}
|
[
3,
6,
8,
10,
12
] |
<|reserved_special_token_0|>
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
def run_tests():
unittest.main(verbosity=2, buffer=True)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
<|reserved_special_token_1|>
import unittest2 as unittest
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
<|reserved_special_token_1|>
import unittest2 as unittest
class GpTestCase(unittest.TestCase):
def __init__(self, methodName='runTest'):
super(GpTestCase, self).__init__(methodName)
self.patches = []
self.mock_objs = []
def apply_patches(self, patches):
if self.patches:
raise Exception('Test class is already patched!')
self.patches = patches
self.mock_objs = [p.start() for p in self.patches]
# if you have a tearDown() in your test class,
# be sure to call this using super.tearDown()
def tearDown(self):
[p.stop() for p in self.patches]
self.mock_objs = []
def add_setup(setup=None, teardown=None):
"""decorate test functions to add additional setup/teardown contexts"""
def decorate_function(test):
def wrapper(self):
if setup:
setup(self)
test(self)
if teardown:
teardown(self)
return wrapper
return decorate_function
# hide unittest dependencies here
def run_tests():
unittest.main(verbosity=2, buffer=True)
skip = unittest.skip
|
flexible
|
{
"blob_id": "e9c88e18472281438783d29648c673aa08366abb",
"index": 1686,
"step-1": "<mask token>\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n\n def decorate_function(test):\n\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n\n def decorate_function(test):\n\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\n\nskip = unittest.skip\n",
"step-4": "import unittest2 as unittest\n\n\nclass GpTestCase(unittest.TestCase):\n\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n\n def decorate_function(test):\n\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\n\nskip = unittest.skip\n",
"step-5": "import unittest2 as unittest\n\n\nclass GpTestCase(unittest.TestCase):\n def __init__(self, methodName='runTest'):\n super(GpTestCase, self).__init__(methodName)\n self.patches = []\n self.mock_objs = []\n\n def apply_patches(self, patches):\n if self.patches:\n raise Exception('Test class is already patched!')\n self.patches = patches\n self.mock_objs = [p.start() for p in self.patches]\n\n # if you have a tearDown() in your test class,\n # be sure to call this using super.tearDown()\n def tearDown(self):\n [p.stop() for p in self.patches]\n self.mock_objs = []\n\ndef add_setup(setup=None, teardown=None):\n \"\"\"decorate test functions to add additional setup/teardown contexts\"\"\"\n def decorate_function(test):\n def wrapper(self):\n if setup:\n setup(self)\n test(self)\n if teardown:\n teardown(self)\n return wrapper\n return decorate_function\n\n# hide unittest dependencies here\ndef run_tests():\n unittest.main(verbosity=2, buffer=True)\n\nskip = unittest.skip\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_register_new_accont(self):
cos = self.cos
cos.get('https://wizzair.com/pl-pl#/')
cos.find_elements_by_class_name(
'navigation__button navigation__button--simple').click()
cos.find_elements_by_class_name('content__link1').click()
cos.find_elemebts_by_name('firstName').click()
cos.find_elemebts_by_name('firstName').clear()
cos.find_elemebts_by_name('firstName').send_keys('Jonasz')
cos.find_elemebts_by_name('lastName').click()
cos.find_elemebts_by_name('lastName').clear()
cos.find_elemebts_by_name('lastName').send_keys('Zsanoj')
cos.find_elements_by_class_name('rf-switch__label').click()
cos.find_elemebts_by_name('mobilePhone').click()
cos.find_elemebts_by_name('mobilePhone').clear()
cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')
cos.find_elemebts_by_name('email').click()
cos.find_elemebts_by_name('email').clear()
cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')
cos.find_elemebts_by_name('password').click()
cos.find_elemebts_by_name('password').clear()
cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')
cos.find_elements_by_class_name('rf-input__input rf-input__input--empty'
).click()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def test_register_new_accont(self):
cos = self.cos
cos.get('https://wizzair.com/pl-pl#/')
cos.find_elements_by_class_name(
'navigation__button navigation__button--simple').click()
cos.find_elements_by_class_name('content__link1').click()
cos.find_elemebts_by_name('firstName').click()
cos.find_elemebts_by_name('firstName').clear()
cos.find_elemebts_by_name('firstName').send_keys('Jonasz')
cos.find_elemebts_by_name('lastName').click()
cos.find_elemebts_by_name('lastName').clear()
cos.find_elemebts_by_name('lastName').send_keys('Zsanoj')
cos.find_elements_by_class_name('rf-switch__label').click()
cos.find_elemebts_by_name('mobilePhone').click()
cos.find_elemebts_by_name('mobilePhone').clear()
cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')
cos.find_elemebts_by_name('email').click()
cos.find_elemebts_by_name('email').clear()
cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')
cos.find_elemebts_by_name('password').click()
cos.find_elemebts_by_name('password').clear()
cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')
cos.find_elements_by_class_name('rf-input__input rf-input__input--empty'
).click()
<|reserved_special_token_1|>
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
def test_register_new_accont(self):
cos = self.cos
cos.get("https://wizzair.com/pl-pl#/")
cos.find_elements_by_class_name('navigation__button navigation__button--simple').click()
cos.find_elements_by_class_name('content__link1').click()
cos.find_elemebts_by_name('firstName').click()
cos.find_elemebts_by_name('firstName').clear()
cos.find_elemebts_by_name('firstName').send_keys("Jonasz")
cos.find_elemebts_by_name('lastName').click()
cos.find_elemebts_by_name('lastName').clear()
cos.find_elemebts_by_name('lastName').send_keys("Zsanoj")
cos.find_elements_by_class_name('rf-switch__label').click()
cos.find_elemebts_by_name('mobilePhone').click()
cos.find_elemebts_by_name('mobilePhone').clear()
cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')
cos.find_elemebts_by_name('email').click()
cos.find_elemebts_by_name('email').clear()
cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')
cos.find_elemebts_by_name('password').click()
cos.find_elemebts_by_name('password').clear()
cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')
cos.find_elements_by_class_name('rf-input__input rf-input__input--empty').click()
|
flexible
|
{
"blob_id": "6efd22feb4f96de74633276b1ec8550f8d853075",
"index": 2657,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_register_new_accont(self):\n cos = self.cos\n cos.get('https://wizzair.com/pl-pl#/')\n cos.find_elements_by_class_name(\n 'navigation__button navigation__button--simple').click()\n cos.find_elements_by_class_name('content__link1').click()\n cos.find_elemebts_by_name('firstName').click()\n cos.find_elemebts_by_name('firstName').clear()\n cos.find_elemebts_by_name('firstName').send_keys('Jonasz')\n cos.find_elemebts_by_name('lastName').click()\n cos.find_elemebts_by_name('lastName').clear()\n cos.find_elemebts_by_name('lastName').send_keys('Zsanoj')\n cos.find_elements_by_class_name('rf-switch__label').click()\n cos.find_elemebts_by_name('mobilePhone').click()\n cos.find_elemebts_by_name('mobilePhone').clear()\n cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')\n cos.find_elemebts_by_name('email').click()\n cos.find_elemebts_by_name('email').clear()\n cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')\n cos.find_elemebts_by_name('password').click()\n cos.find_elemebts_by_name('password').clear()\n cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')\n cos.find_elements_by_class_name('rf-input__input rf-input__input--empty'\n ).click()\n",
"step-3": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndef test_register_new_accont(self):\n cos = self.cos\n cos.get('https://wizzair.com/pl-pl#/')\n cos.find_elements_by_class_name(\n 'navigation__button navigation__button--simple').click()\n cos.find_elements_by_class_name('content__link1').click()\n cos.find_elemebts_by_name('firstName').click()\n cos.find_elemebts_by_name('firstName').clear()\n cos.find_elemebts_by_name('firstName').send_keys('Jonasz')\n cos.find_elemebts_by_name('lastName').click()\n cos.find_elemebts_by_name('lastName').clear()\n cos.find_elemebts_by_name('lastName').send_keys('Zsanoj')\n cos.find_elements_by_class_name('rf-switch__label').click()\n cos.find_elemebts_by_name('mobilePhone').click()\n cos.find_elemebts_by_name('mobilePhone').clear()\n cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')\n cos.find_elemebts_by_name('email').click()\n cos.find_elemebts_by_name('email').clear()\n cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')\n cos.find_elemebts_by_name('password').click()\n cos.find_elemebts_by_name('password').clear()\n cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')\n cos.find_elements_by_class_name('rf-input__input rf-input__input--empty'\n ).click()\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndef test_register_new_accont(self):\n cos = self.cos\n cos.get(\"https://wizzair.com/pl-pl#/\")\n cos.find_elements_by_class_name('navigation__button navigation__button--simple').click()\n cos.find_elements_by_class_name('content__link1').click()\n cos.find_elemebts_by_name('firstName').click()\n cos.find_elemebts_by_name('firstName').clear()\n cos.find_elemebts_by_name('firstName').send_keys(\"Jonasz\")\n cos.find_elemebts_by_name('lastName').click()\n cos.find_elemebts_by_name('lastName').clear()\n cos.find_elemebts_by_name('lastName').send_keys(\"Zsanoj\")\n cos.find_elements_by_class_name('rf-switch__label').click()\n cos.find_elemebts_by_name('mobilePhone').click()\n cos.find_elemebts_by_name('mobilePhone').clear()\n cos.find_elemebts_by_name('mobilePhone').send_keys('71661234567')\n cos.find_elemebts_by_name('email').click()\n cos.find_elemebts_by_name('email').clear()\n cos.find_elemebts_by_name('email').send_keys('Jonasz.Zsanoj@gmail.cooooom')\n cos.find_elemebts_by_name('password').click()\n cos.find_elemebts_by_name('password').clear()\n cos.find_elemebts_by_name('password').send_keys('zaq1@WSX')\n cos.find_elements_by_class_name('rf-input__input rf-input__input--empty').click()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
while keyboard.is_pressed('s') == False:
if pyautogui.pixel(xPosition, yPosition)[0] == 0:
click(xPosition, yPosition)
<|reserved_special_token_1|>
import pyautogui
import keyboard
import win32api
import win32con
import time
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
while keyboard.is_pressed('s') == False:
if pyautogui.pixel(xPosition, yPosition)[0] == 0:
click(xPosition, yPosition)
<|reserved_special_token_1|>
# The purpose of this bot is to cick the first black pixel.
# Testing a change here done by Git.
# changes through branches
import pyautogui
import keyboard
import win32api
import win32con
import time
# click function, with a 0.01 pause inorder to properly run the script
def click(x, y):
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
# pressing 's' to stop the function
while keyboard.is_pressed('s') == False:
# If the pixel is black (0), click on that pixel
if pyautogui.pixel(xPosition, yPosition)[0] == 0:
click(xPosition, yPosition)
|
flexible
|
{
"blob_id": "9f831b8c90dd428879319b63712bd03fcc01b631",
"index": 8212,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n\nwhile keyboard.is_pressed('s') == False:\n if pyautogui.pixel(xPosition, yPosition)[0] == 0:\n click(xPosition, yPosition)\n",
"step-4": "import pyautogui\nimport keyboard\nimport win32api\nimport win32con\nimport time\n\n\ndef click(x, y):\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n\nwhile keyboard.is_pressed('s') == False:\n if pyautogui.pixel(xPosition, yPosition)[0] == 0:\n click(xPosition, yPosition)\n",
"step-5": "# The purpose of this bot is to cick the first black pixel.\r\n# Testing a change here done by Git. \r\n# changes through branches\r\n\r\nimport pyautogui\r\nimport keyboard\r\nimport win32api\r\nimport win32con\r\nimport time\r\n\r\n# click function, with a 0.01 pause inorder to properly run the script\r\n\r\n\r\ndef click(x, y):\r\n win32api.SetCursorPos((x, y))\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\r\n time.sleep(0.01)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\r\n\r\n\r\n# pressing 's' to stop the function\r\n\r\nwhile keyboard.is_pressed('s') == False:\r\n\r\n # If the pixel is black (0), click on that pixel\r\n\r\n if pyautogui.pixel(xPosition, yPosition)[0] == 0:\r\n click(xPosition, yPosition)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from bbdd import *
def usuario():
global usser
usser=input("Introduce un usuario : ")
if len(usser)<5 or len(usser)>15:
print("El usuario debe tener entre 5 y 15 caracteres")
usuario()
elif usser.isalnum()==False:
print("Los valores del usurio deben ser únicamente letras o números")
usuario()
else:
print(True)
def contraseña():
global passw
passw=input("Introduce contraseña: ")
if len(passw)<=9:
print("La contraseña debe tener al menos 10 caractéres")
contraseña()
elif passw.isalnum()==True:
print ("La contraseña debe tener al menos un carácter no alfanumérico")
contraseña()
elif passw.lower() == passw:
print("Debe haber por lo menos una mayúscula")
contraseña()
elif passw.upper()==passw:
print("Debe haber por lo menos una minúscula")
contraseña()
for i in passw:
if i==" ":
print("La contraseña no debe tener espacios en blanco")
contraseña()
print(True)
|
normal
|
{
"blob_id": "ce75c23c6b0862dde797225f53c900b4ebc56428",
"index": 514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"step-4": "from bbdd import *\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"step-5": "from bbdd import *\n\n\ndef usuario():\n global usser\n usser=input(\"Introduce un usuario : \")\n if len(usser)<5 or len(usser)>15:\n print(\"El usuario debe tener entre 5 y 15 caracteres\")\n usuario()\n elif usser.isalnum()==False:\n print(\"Los valores del usurio deben ser únicamente letras o números\")\n usuario()\n else:\n print(True)\n\n\n\ndef contraseña():\n global passw\n passw=input(\"Introduce contraseña: \")\n if len(passw)<=9:\n print(\"La contraseña debe tener al menos 10 caractéres\")\n contraseña()\n elif passw.isalnum()==True:\n print (\"La contraseña debe tener al menos un carácter no alfanumérico\")\n contraseña()\n elif passw.lower() == passw:\n print(\"Debe haber por lo menos una mayúscula\")\n contraseña()\n elif passw.upper()==passw:\n print(\"Debe haber por lo menos una minúscula\")\n contraseña()\n\n for i in passw:\n if i==\" \":\n print(\"La contraseña no debe tener espacios en blanco\")\n contraseña()\n print(True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_test(file_name, capture_stdout=True, allure_dir=None):
cmd = [file_name, '-vvv']
if capture_stdout:
cmd.append('-s')
test_name = os.path.splitext(os.path.basename(file_name))[0]
alluredir = os.path.normpath('%s/%s/' % (allure_dir or 'allure-results',
test_name))
cmd.extend(['--alluredir', alluredir])
print(cmd)
sys.exit(pytest.main(cmd))
<|reserved_special_token_1|>
import os
import sys
import pytest
def run_test(file_name, capture_stdout=True, allure_dir=None):
cmd = [file_name, '-vvv']
if capture_stdout:
cmd.append('-s')
test_name = os.path.splitext(os.path.basename(file_name))[0]
alluredir = os.path.normpath('%s/%s/' % (allure_dir or 'allure-results',
test_name))
cmd.extend(['--alluredir', alluredir])
print(cmd)
sys.exit(pytest.main(cmd))
<|reserved_special_token_1|>
import os
import sys
import pytest
def run_test(file_name, capture_stdout=True, allure_dir=None):
cmd = [
file_name, "-vvv",
]
if capture_stdout:
cmd.append("-s")
test_name = os.path.splitext(os.path.basename(file_name))[0]
alluredir = os.path.normpath("%s/%s/" % (allure_dir or "allure-results", test_name))
cmd.extend(["--alluredir", alluredir])
print(cmd)
sys.exit(pytest.main(cmd))
|
flexible
|
{
"blob_id": "7e7a50cb8e66a71c1df2d61241f8a55c042b7d59",
"index": 2664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_test(file_name, capture_stdout=True, allure_dir=None):\n cmd = [file_name, '-vvv']\n if capture_stdout:\n cmd.append('-s')\n test_name = os.path.splitext(os.path.basename(file_name))[0]\n alluredir = os.path.normpath('%s/%s/' % (allure_dir or 'allure-results',\n test_name))\n cmd.extend(['--alluredir', alluredir])\n print(cmd)\n sys.exit(pytest.main(cmd))\n",
"step-3": "import os\nimport sys\nimport pytest\n\n\ndef run_test(file_name, capture_stdout=True, allure_dir=None):\n cmd = [file_name, '-vvv']\n if capture_stdout:\n cmd.append('-s')\n test_name = os.path.splitext(os.path.basename(file_name))[0]\n alluredir = os.path.normpath('%s/%s/' % (allure_dir or 'allure-results',\n test_name))\n cmd.extend(['--alluredir', alluredir])\n print(cmd)\n sys.exit(pytest.main(cmd))\n",
"step-4": "import os\nimport sys\nimport pytest\n\n\ndef run_test(file_name, capture_stdout=True, allure_dir=None):\n cmd = [\n file_name, \"-vvv\",\n ]\n\n if capture_stdout:\n cmd.append(\"-s\")\n\n test_name = os.path.splitext(os.path.basename(file_name))[0]\n alluredir = os.path.normpath(\"%s/%s/\" % (allure_dir or \"allure-results\", test_name))\n cmd.extend([\"--alluredir\", alluredir])\n print(cmd)\n sys.exit(pytest.main(cmd))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#2) write a program to make banking system develop business logic
#in one module and call functionality in another .py file
class Customer: #user defined class
def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments
self._name=name
self._pno=phoneno
self._add=address
self._pin=pin
self._acc=accno
self._bal=balance#protected variable
def add(self) : #user defined method
self._d={} #create empty dictionary
self._d['CustomerName']=self._name #add values to the dictionary using key names
self._d['CustomerPhonenumber']=self._pno
self._d['CustomerAddress']=self._add
self._d['CustomerPin']=self._pin
self._d['CustomerAccountNumber']=self._acc
self._d['CustomerBalance']=self._bal
print('Customer Details Add Successfully')
def deposit(self):
amt=int(input('Enter Deposit amount : '))
self._d['CustomerBalance']+=amt
print('Your a/c is credited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def withdraw(self):
amt=int(input('Enter Withdraw amount : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
def transfer(self):
name=input('Enter Recipient name : ')
acc=input('Enter account number : ')
if len(acc)==16:
amt=int(input('Enter amount to transfer : '))
if amt>self._d['CustomerBalance'] :
print('Insufficient Balance')
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
self._d['CustomerBalance']-=amt
print('Transfer amount successfully')
print('Your a/c is debited for INR ',amt)
print('Account Balance is ',self._d['CustomerBalance'])
print()
else:
print('Invalid Account Number\n')
def mini(self):
print('Name : ',self._d['CustomerName'])
print('Account Balance is ',self._d['CustomerBalance'])
print()
def __del__(self): #destructor
print('Thank You')
pass
|
normal
|
{
"blob_id": "cf5a9b8dad5a02610fa5ce2a849b6f9fc50a0aa8",
"index": 1872,
"step-1": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n <mask token>\n <mask token>\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-2": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n <mask token>\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-3": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n\n def add(self):\n self._d = {}\n self._d['CustomerName'] = self._name\n self._d['CustomerPhonenumber'] = self._pno\n self._d['CustomerAddress'] = self._add\n self._d['CustomerPin'] = self._pin\n self._d['CustomerAccountNumber'] = self._acc\n self._d['CustomerBalance'] = self._bal\n print('Customer Details Add Successfully')\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n <mask token>\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-4": "class Customer:\n\n def __init__(self, name, phoneno, address, pin, accno, balance):\n self._name = name\n self._pno = phoneno\n self._add = address\n self._pin = pin\n self._acc = accno\n self._bal = balance\n\n def add(self):\n self._d = {}\n self._d['CustomerName'] = self._name\n self._d['CustomerPhonenumber'] = self._pno\n self._d['CustomerAddress'] = self._add\n self._d['CustomerPin'] = self._pin\n self._d['CustomerAccountNumber'] = self._acc\n self._d['CustomerBalance'] = self._bal\n print('Customer Details Add Successfully')\n\n def deposit(self):\n amt = int(input('Enter Deposit amount : '))\n self._d['CustomerBalance'] += amt\n print('Your a/c is credited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def withdraw(self):\n amt = int(input('Enter Withdraw amount : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def transfer(self):\n name = input('Enter Recipient name : ')\n acc = input('Enter account number : ')\n if len(acc) == 16:\n amt = int(input('Enter amount to transfer : '))\n if amt > self._d['CustomerBalance']:\n print('Insufficient Balance')\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n self._d['CustomerBalance'] -= amt\n print('Transfer amount successfully')\n print('Your a/c is debited for INR ', amt)\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n else:\n print('Invalid Account Number\\n')\n\n def mini(self):\n print('Name : ', self._d['CustomerName'])\n print('Account Balance is ', self._d['CustomerBalance'])\n print()\n\n def __del__(self):\n print('Thank You')\n pass\n",
"step-5": "#2) write a program to make banking system develop business logic\r\n#in one module and call functionality in another .py file\r\n\r\nclass Customer: #user defined class\r\n def __init__(self,name,phoneno,address,pin,accno,balance) : #constructor with multiple arguments\r\n self._name=name \r\n self._pno=phoneno\r\n self._add=address\r\n self._pin=pin\r\n self._acc=accno\r\n self._bal=balance#protected variable\r\n def add(self) : #user defined method\r\n self._d={} #create empty dictionary\r\n self._d['CustomerName']=self._name #add values to the dictionary using key names\r\n self._d['CustomerPhonenumber']=self._pno\r\n self._d['CustomerAddress']=self._add\r\n self._d['CustomerPin']=self._pin\r\n self._d['CustomerAccountNumber']=self._acc\r\n self._d['CustomerBalance']=self._bal\r\n print('Customer Details Add Successfully')\r\n def deposit(self):\r\n amt=int(input('Enter Deposit amount : '))\r\n self._d['CustomerBalance']+=amt\r\n print('Your a/c is credited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def withdraw(self):\r\n amt=int(input('Enter Withdraw amount : '))\r\n if amt>self._d['CustomerBalance'] :\r\n print('Insufficient Balance')\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n self._d['CustomerBalance']-=amt\r\n print('Your a/c is debited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def transfer(self):\r\n name=input('Enter Recipient name : ')\r\n acc=input('Enter account number : ')\r\n if len(acc)==16:\r\n amt=int(input('Enter amount to transfer : '))\r\n if amt>self._d['CustomerBalance'] :\r\n print('Insufficient Balance')\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n self._d['CustomerBalance']-=amt\r\n print('Transfer amount successfully')\r\n print('Your a/c is debited for INR ',amt)\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n else:\r\n print('Invalid Account Number\\n')\r\n def mini(self):\r\n print('Name : ',self._d['CustomerName'])\r\n print('Account Balance is ',self._d['CustomerBalance'])\r\n print()\r\n def __del__(self): #destructor\r\n print('Thank You')\r\n pass\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
my_dict = {'one': '1', 'two': '2'}
for key in my_dict:
print('{} - {}'.format(key, my_dict[key]))
|
normal
|
{
"blob_id": "1d524312cbd3b735850046131f31c03fdfa90bbc",
"index": 483,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key in my_dict:\n print('{} - {}'.format(key, my_dict[key]))\n",
"step-3": "my_dict = {'one': '1', 'two': '2'}\nfor key in my_dict:\n print('{} - {}'.format(key, my_dict[key]))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# KMeans
# 参考 https://qiita.com/g-k/items/0d5d22a12a4507ecbf11
#
# データを適当なクラスタに分けた後、クラスタの平均を用いてうまい具合にデータがわかれるように調整させていくアルゴリズム
# 任意の指定のk個のクラスタを作成するアルゴリズムであることから、k-means法(k点平均法と呼ばれている)
# k-meansの初期値選択の弱点を解消したのが、k-means++
# k-means++では、中心点が互いに遠いところに配置されるような確率が高くなるように操作する。
# 教師なし学習のアルゴリズム
# 主に正解ラベルの無いベクトル形式のデータをクラスタリングするのに用いられる。
# 1 1つ目の中心点を、データ点の中から均等な確率でランダムに選ぶ。
# 2 残り全てのデータ点について、既存の中心点との距離の2乗を計算して足し合わせる。
# 3 2.の結果を合計した値で、それぞれの距離の2乗を割る。
# 4 3.の結果を新たな確率として、2つ目の中心点を選ぶ。
# 5 2.~4.を、クラスター数と同じ数の中心点が出来るまで繰り返す。
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
# 入力:データ、クラスター数、中心点の初期値、繰り返し回数
# 出力:各クラスターの中心点、各データ点の属するクラスター番号
def kmeansplus(X,K,n_iter):
n = X.shape[0]
idx = np.zeros(X.shape[0])
distance = np.zeros(n*K).reshape(n,K)
centers = np.zeros(X.shape[1]*K).reshape(K,-1)
#最初の確率は均等
pr = np.repeat(1/n,n)
#1つ目の中心点はランダムに選ぶ
centers[0,:] = X[np.random.choice(np.arange(n),1,p=pr),]
distance[:,0] = np.sum((X-centers[0,:])**2,axis=1)
for k in np.arange(1,K):
pr = np.sum(distance,axis=1)/np.sum(distance)
centers[k,:] = X[np.random.choice(np.arange(n),1,p=pr),]
distance[:,k] = np.sum((X-centers[k,:])**2,axis=1)
for _ in range(n_iter):
#データ点と中心点の距離を計算し、一番近い中心点のインデックス(クラスター番号)を返す。
for i in range(X.shape[0]):
idx[i] = np.argmin(np.sum((X[i,:] - centers)**2,axis=1))
#重心を計算して中心点を移動させる
for k in range(K):
centers[k,:] = X[idx==k,:].mean(axis=0)
return idx,centers
def main():
# サンプルとして、4種類の2次元正規乱数に従う点を各20個ずつ、計80個生成した。
# データは以下のように散らばっている
#データの生成
np.random.seed(123)
x1 = np.r_[np.random.normal(size=20,loc=1,scale=2),np.random.normal(size=20,loc=8,scale=2)
,np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=25,scale=2)]
x2 = np.r_[np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=1,scale=2)
,np.random.normal(size=20,loc=20,scale=2),np.random.normal(size=20,loc=0,scale=2)]
X = np.c_[x1,x2]
#可視化
plt.figure(figsize=(6,6))
plt.scatter(X[:,0],X[:,1],c="black",s=10,alpha=0.5)
plt.show()
# k-means法で4グループにクラスタリングしてみる。
# 簡単のため、繰り返し回数は4回とする。
K=4
centers = np.array([[0,5],[5,0],[10,15],[20,10]])
inter = 9
idx, centers = kmeansplus(X,K,inter)
data = pd.DataFrame(X,columns=["X","Y"])
data["idx"] = idx
data0 = data[data.idx==0]
data1 = data[data.idx==1]
data2 = data[data.idx==2]
data3 = data[data.idx==3]
plt.figure(figsize=(6,6))
plt.scatter(data0.X,data0.Y,color="r",s=10,alpha=0.5)
plt.scatter(data1.X,data1.Y,color="b",s=10,alpha=0.5)
plt.scatter(data2.X,data2.Y,color="g",s=10,alpha=0.5)
plt.scatter(data3.X,data3.Y,color="orange",s=10,alpha=0.5)
plt.scatter(centers[:,0],centers[:,1],color=["r","b","g","orange"])
plt.show()
plt.show()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "10bf7959f178d3b5c0ce6e97253e665d32363af7",
"index": 6015,
"step-1": "<mask token>\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\ndef main():\n np.random.seed(123)\n x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(\n size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2\n ), np.random.normal(size=20, loc=25, scale=2)]\n x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal\n (size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=\n 2), np.random.normal(size=20, loc=0, scale=2)]\n X = np.c_[x1, x2]\n plt.figure(figsize=(6, 6))\n plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)\n plt.show()\n K = 4\n centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])\n inter = 9\n idx, centers = kmeansplus(X, K, inter)\n data = pd.DataFrame(X, columns=['X', 'Y'])\n data['idx'] = idx\n data0 = data[data.idx == 0]\n data1 = data[data.idx == 1]\n data2 = data[data.idx == 2]\n data3 = data[data.idx == 3]\n plt.figure(figsize=(6, 6))\n plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)\n plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)\n plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)\n plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)\n plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\ndef main():\n np.random.seed(123)\n x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(\n size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2\n ), np.random.normal(size=20, loc=25, scale=2)]\n x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal\n (size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=\n 2), np.random.normal(size=20, loc=0, scale=2)]\n X = np.c_[x1, x2]\n plt.figure(figsize=(6, 6))\n plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)\n plt.show()\n K = 4\n centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])\n inter = 9\n idx, centers = kmeansplus(X, K, inter)\n data = pd.DataFrame(X, columns=['X', 'Y'])\n data['idx'] = idx\n data0 = data[data.idx == 0]\n data1 = data[data.idx == 1]\n data2 = data[data.idx == 2]\n data3 = data[data.idx == 3]\n plt.figure(figsize=(6, 6))\n plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)\n plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)\n plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)\n plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)\n plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])\n plt.show()\n\n\nplt.show()\nif __name__ == '__main__':\n main()\n",
"step-4": "import matplotlib.font_manager as fm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.model_selection import train_test_split\n\n\ndef kmeansplus(X, K, n_iter):\n n = X.shape[0]\n idx = np.zeros(X.shape[0])\n distance = np.zeros(n * K).reshape(n, K)\n centers = np.zeros(X.shape[1] * K).reshape(K, -1)\n pr = np.repeat(1 / n, n)\n centers[0, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, 0] = np.sum((X - centers[0, :]) ** 2, axis=1)\n for k in np.arange(1, K):\n pr = np.sum(distance, axis=1) / np.sum(distance)\n centers[k, :] = X[np.random.choice(np.arange(n), 1, p=pr),]\n distance[:, k] = np.sum((X - centers[k, :]) ** 2, axis=1)\n for _ in range(n_iter):\n for i in range(X.shape[0]):\n idx[i] = np.argmin(np.sum((X[i, :] - centers) ** 2, axis=1))\n for k in range(K):\n centers[k, :] = X[idx == k, :].mean(axis=0)\n return idx, centers\n\n\ndef main():\n np.random.seed(123)\n x1 = np.r_[np.random.normal(size=20, loc=1, scale=2), np.random.normal(\n size=20, loc=8, scale=2), np.random.normal(size=20, loc=15, scale=2\n ), np.random.normal(size=20, loc=25, scale=2)]\n x2 = np.r_[np.random.normal(size=20, loc=15, scale=2), np.random.normal\n (size=20, loc=1, scale=2), np.random.normal(size=20, loc=20, scale=\n 2), np.random.normal(size=20, loc=0, scale=2)]\n X = np.c_[x1, x2]\n plt.figure(figsize=(6, 6))\n plt.scatter(X[:, 0], X[:, 1], c='black', s=10, alpha=0.5)\n plt.show()\n K = 4\n centers = np.array([[0, 5], [5, 0], [10, 15], [20, 10]])\n inter = 9\n idx, centers = kmeansplus(X, K, inter)\n data = pd.DataFrame(X, columns=['X', 'Y'])\n data['idx'] = idx\n data0 = data[data.idx == 0]\n data1 = data[data.idx == 1]\n data2 = data[data.idx == 2]\n data3 = data[data.idx == 3]\n plt.figure(figsize=(6, 6))\n plt.scatter(data0.X, data0.Y, color='r', s=10, alpha=0.5)\n plt.scatter(data1.X, data1.Y, color='b', s=10, alpha=0.5)\n plt.scatter(data2.X, data2.Y, color='g', s=10, alpha=0.5)\n plt.scatter(data3.X, data3.Y, color='orange', s=10, alpha=0.5)\n plt.scatter(centers[:, 0], centers[:, 1], color=['r', 'b', 'g', 'orange'])\n plt.show()\n\n\nplt.show()\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\r\n\r\n# KMeans\r\n# 参考 https://qiita.com/g-k/items/0d5d22a12a4507ecbf11\r\n# \r\n# データを適当なクラスタに分けた後、クラスタの平均を用いてうまい具合にデータがわかれるように調整させていくアルゴリズム\r\n# 任意の指定のk個のクラスタを作成するアルゴリズムであることから、k-means法(k点平均法と呼ばれている)\r\n\r\n# k-meansの初期値選択の弱点を解消したのが、k-means++\r\n# k-means++では、中心点が互いに遠いところに配置されるような確率が高くなるように操作する。\r\n\r\n# 教師なし学習のアルゴリズム\r\n# 主に正解ラベルの無いベクトル形式のデータをクラスタリングするのに用いられる。\r\n# 1 1つ目の中心点を、データ点の中から均等な確率でランダムに選ぶ。\r\n# 2 残り全てのデータ点について、既存の中心点との距離の2乗を計算して足し合わせる。\r\n# 3 2.の結果を合計した値で、それぞれの距離の2乗を割る。\r\n# 4 3.の結果を新たな確率として、2つ目の中心点を選ぶ。\r\n# 5 2.~4.を、クラスター数と同じ数の中心点が出来るまで繰り返す。\r\n\r\nimport matplotlib.font_manager as fm\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom matplotlib import cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\n# 入力:データ、クラスター数、中心点の初期値、繰り返し回数\r\n# 出力:各クラスターの中心点、各データ点の属するクラスター番号\r\ndef kmeansplus(X,K,n_iter):\r\n n = X.shape[0]\r\n idx = np.zeros(X.shape[0])\r\n distance = np.zeros(n*K).reshape(n,K)\r\n centers = np.zeros(X.shape[1]*K).reshape(K,-1)\r\n\r\n #最初の確率は均等\r\n pr = np.repeat(1/n,n)\r\n #1つ目の中心点はランダムに選ぶ\r\n centers[0,:] = X[np.random.choice(np.arange(n),1,p=pr),]\r\n distance[:,0] = np.sum((X-centers[0,:])**2,axis=1)\r\n \r\n for k in np.arange(1,K):\r\n pr = np.sum(distance,axis=1)/np.sum(distance)\r\n centers[k,:] = X[np.random.choice(np.arange(n),1,p=pr),]\r\n distance[:,k] = np.sum((X-centers[k,:])**2,axis=1)\r\n \r\n for _ in range(n_iter):\r\n #データ点と中心点の距離を計算し、一番近い中心点のインデックス(クラスター番号)を返す。\r\n for i in range(X.shape[0]):\r\n idx[i] = np.argmin(np.sum((X[i,:] - centers)**2,axis=1))\r\n #重心を計算して中心点を移動させる\r\n for k in range(K):\r\n centers[k,:] = X[idx==k,:].mean(axis=0)\r\n\r\n return idx,centers\r\n\r\ndef main():\r\n # サンプルとして、4種類の2次元正規乱数に従う点を各20個ずつ、計80個生成した。\r\n # データは以下のように散らばっている\r\n\r\n #データの生成\r\n np.random.seed(123)\r\n x1 = np.r_[np.random.normal(size=20,loc=1,scale=2),np.random.normal(size=20,loc=8,scale=2)\r\n ,np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=25,scale=2)]\r\n x2 = np.r_[np.random.normal(size=20,loc=15,scale=2),np.random.normal(size=20,loc=1,scale=2)\r\n ,np.random.normal(size=20,loc=20,scale=2),np.random.normal(size=20,loc=0,scale=2)]\r\n X = np.c_[x1,x2]\r\n\r\n #可視化\r\n plt.figure(figsize=(6,6))\r\n plt.scatter(X[:,0],X[:,1],c=\"black\",s=10,alpha=0.5)\r\n plt.show()\r\n\r\n # k-means法で4グループにクラスタリングしてみる。\r\n # 簡単のため、繰り返し回数は4回とする。\r\n K=4\r\n centers = np.array([[0,5],[5,0],[10,15],[20,10]])\r\n inter = 9\r\n\r\n idx, centers = kmeansplus(X,K,inter)\r\n\r\n data = pd.DataFrame(X,columns=[\"X\",\"Y\"])\r\n data[\"idx\"] = idx\r\n\r\n data0 = data[data.idx==0]\r\n data1 = data[data.idx==1]\r\n data2 = data[data.idx==2]\r\n data3 = data[data.idx==3]\r\n\r\n plt.figure(figsize=(6,6))\r\n plt.scatter(data0.X,data0.Y,color=\"r\",s=10,alpha=0.5)\r\n plt.scatter(data1.X,data1.Y,color=\"b\",s=10,alpha=0.5)\r\n plt.scatter(data2.X,data2.Y,color=\"g\",s=10,alpha=0.5)\r\n plt.scatter(data3.X,data3.Y,color=\"orange\",s=10,alpha=0.5)\r\n plt.scatter(centers[:,0],centers[:,1],color=[\"r\",\"b\",\"g\",\"orange\"]) \r\n plt.show()\r\n\r\n\r\nplt.show()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hiddenimports = ['sklearn.utils.sparsetools._graph_validation',
'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma',
'sklearn.utils.weight_vector']
datas = collect_data_files('sklearn')
<|reserved_special_token_1|>
from PyInstaller.utils.hooks import collect_data_files
hiddenimports = ['sklearn.utils.sparsetools._graph_validation',
'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma',
'sklearn.utils.weight_vector']
datas = collect_data_files('sklearn')
|
flexible
|
{
"blob_id": "12396130dc52866cc54d6dc701cf0f9a41a168b6",
"index": 8351,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhiddenimports = ['sklearn.utils.sparsetools._graph_validation',\n 'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma',\n 'sklearn.utils.weight_vector']\ndatas = collect_data_files('sklearn')\n",
"step-3": "from PyInstaller.utils.hooks import collect_data_files\nhiddenimports = ['sklearn.utils.sparsetools._graph_validation',\n 'sklearn.utils.sparsetools._graph_tools', 'sklearn.utils.lgamma',\n 'sklearn.utils.weight_vector']\ndatas = collect_data_files('sklearn')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if x == 'Y' or x == 'N' or x == 'Q':
while x == 'Y' or x == 'N' or x == 'Q':
if x == 'Q':
print('Exiting the Program')
import sys
sys.exit()
elif x == 'N':
print('You decided to leave. See you again!” ')
break
else:
print('Invalid selection is entered')
<|reserved_special_token_1|>
x = input('Do you really want to run this program? (y/n) : ')
x = x.upper()
if x == 'Y' or x == 'N' or x == 'Q':
while x == 'Y' or x == 'N' or x == 'Q':
if x == 'Q':
print('Exiting the Program')
import sys
sys.exit()
elif x == 'N':
print('You decided to leave. See you again!” ')
break
else:
print('Invalid selection is entered')
<|reserved_special_token_1|>
x=input("Do you really want to run this program? (y/n) : ")
x=x.upper()
if x=="Y" or x=="N" or x=="Q":
while x=="Y" or x=="N" or x=="Q":
if x=="Q":
print("Exiting the Program")
import sys
sys.exit()
elif x=="N":
print("You decided to leave. See you again!” ")
break
#elif x=="Y":
#You can run the program.Enter the code required to run the program
else:
print("Invalid selection is entered")
|
flexible
|
{
"blob_id": "7dff15a16ecc3ce3952f4b47290393ea3183807f",
"index": 4414,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif x == 'Y' or x == 'N' or x == 'Q':\n while x == 'Y' or x == 'N' or x == 'Q':\n if x == 'Q':\n print('Exiting the Program')\n import sys\n sys.exit()\n elif x == 'N':\n print('You decided to leave. See you again!” ')\n break\nelse:\n print('Invalid selection is entered')\n",
"step-3": "x = input('Do you really want to run this program? (y/n) : ')\nx = x.upper()\nif x == 'Y' or x == 'N' or x == 'Q':\n while x == 'Y' or x == 'N' or x == 'Q':\n if x == 'Q':\n print('Exiting the Program')\n import sys\n sys.exit()\n elif x == 'N':\n print('You decided to leave. See you again!” ')\n break\nelse:\n print('Invalid selection is entered')\n",
"step-4": "x=input(\"Do you really want to run this program? (y/n) : \")\nx=x.upper()\n\nif x==\"Y\" or x==\"N\" or x==\"Q\":\n while x==\"Y\" or x==\"N\" or x==\"Q\":\n if x==\"Q\":\n print(\"Exiting the Program\")\n import sys\n sys.exit()\n elif x==\"N\":\n print(\"You decided to leave. See you again!” \")\n break\n #elif x==\"Y\":\n #You can run the program.Enter the code required to run the program\nelse:\n print(\"Invalid selection is entered\") \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template, redirect, url_for, flash
import subprocess
def check_output(*args):
return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]
mod = Blueprint('system', __name__)
@mod.route('/')
def index():
uptime = check_output(["uptime"])
return render_template('system/system.html', uptime=uptime)
@mod.route('/shutdown')
def shutdown():
flash("Shutting down.<br>When the LEDs on the board stop flashing, \
it should be safe to unplug your Raspberry Pi.")
subprocess.call(["sudo", "halt"])
return redirect(url_for('system.index'))
@mod.route('/reboot')
def reboot():
flash("Rebooting... please wait.<br>This will take approx. one minute.")
subprocess.call(["sudo", "reboot"])
return redirect(url_for('system.index'))
|
normal
|
{
"blob_id": "e056a1600b620519e729c597dcec57793284019a",
"index": 1470,
"step-1": "<mask token>\n\n\n@mod.route('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\n\n<mask token>\n\n\n@mod.route('/')\ndef index():\n uptime = check_output(['uptime'])\n return render_template('system/system.html', uptime=uptime)\n\n\n@mod.route('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\n@mod.route('/reboot')\ndef reboot():\n flash('Rebooting... please wait.<br>This will take approx. one minute.')\n subprocess.call(['sudo', 'reboot'])\n return redirect(url_for('system.index'))\n",
"step-3": "<mask token>\n\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\n\nmod = Blueprint('system', __name__)\n\n\n@mod.route('/')\ndef index():\n uptime = check_output(['uptime'])\n return render_template('system/system.html', uptime=uptime)\n\n\n@mod.route('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\n@mod.route('/reboot')\ndef reboot():\n flash('Rebooting... please wait.<br>This will take approx. one minute.')\n subprocess.call(['sudo', 'reboot'])\n return redirect(url_for('system.index'))\n",
"step-4": "from flask import Blueprint, render_template, redirect, url_for, flash\nimport subprocess\n\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\n\nmod = Blueprint('system', __name__)\n\n\n@mod.route('/')\ndef index():\n uptime = check_output(['uptime'])\n return render_template('system/system.html', uptime=uptime)\n\n\n@mod.route('/shutdown')\ndef shutdown():\n flash(\n 'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'\n )\n subprocess.call(['sudo', 'halt'])\n return redirect(url_for('system.index'))\n\n\n@mod.route('/reboot')\ndef reboot():\n flash('Rebooting... please wait.<br>This will take approx. one minute.')\n subprocess.call(['sudo', 'reboot'])\n return redirect(url_for('system.index'))\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom flask import Blueprint, render_template, redirect, url_for, flash\nimport subprocess\n\ndef check_output(*args):\n return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]\n\nmod = Blueprint('system', __name__)\n\n@mod.route('/')\ndef index():\n uptime = check_output([\"uptime\"])\n return render_template('system/system.html', uptime=uptime)\n\n@mod.route('/shutdown')\ndef shutdown():\n flash(\"Shutting down.<br>When the LEDs on the board stop flashing, \\\n it should be safe to unplug your Raspberry Pi.\")\n subprocess.call([\"sudo\", \"halt\"])\n return redirect(url_for('system.index'))\n\n@mod.route('/reboot')\ndef reboot():\n flash(\"Rebooting... please wait.<br>This will take approx. one minute.\")\n subprocess.call([\"sudo\", \"reboot\"])\n return redirect(url_for('system.index'))\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
#!/usr/bin/env python
import sys
import json
import time
import random
import pathlib
import argparse
import subprocess
proc = None
def get_wallpaper(FOLDER):
files = [path for path in pathlib.Path(FOLDER).iterdir()
if path.is_file()]
return random.choice(files)
def get_outputs():
cmd = ['swaymsg', '-t', 'get_outputs']
proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()
proc_json = json.loads(proc_result)
return [output['name'] for output in proc_json]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Set a random wallpaper per output')
parser.add_argument('--folder', metavar='D', type=str, nargs=1,
help='folder to search for images')
parser.add_argument('--delay', metavar='S', type=int,
help='How many seconds to wait before changing the wallpaper')
args = parser.parse_args()
while True:
try:
outputs = get_outputs()
cmd = 'swaybg'
for output in outputs:
image = get_wallpaper(args.folder[0])
cmd = f'{cmd} --image={image} --output={output}'
print(cmd)
proc = subprocess.Popen(cmd, shell=True)
time.sleep(args.delay)
proc.kill()
except Exception as e:
print(e, file=sys.stderr)
finally:
if proc:
proc.kill()
|
normal
|
{
"blob_id": "46b1991bba83968466390d306a4415b362b6a868",
"index": 3140,
"step-1": "<mask token>\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Set a random wallpaper per output')\n parser.add_argument('--folder', metavar='D', type=str, nargs=1, help=\n 'folder to search for images')\n parser.add_argument('--delay', metavar='S', type=int, help=\n 'How many seconds to wait before changing the wallpaper')\n args = parser.parse_args()\n while True:\n try:\n outputs = get_outputs()\n cmd = 'swaybg'\n for output in outputs:\n image = get_wallpaper(args.folder[0])\n cmd = f'{cmd} --image={image} --output={output}'\n print(cmd)\n proc = subprocess.Popen(cmd, shell=True)\n time.sleep(args.delay)\n proc.kill()\n except Exception as e:\n print(e, file=sys.stderr)\n finally:\n if proc:\n proc.kill()\n",
"step-4": "import sys\nimport json\nimport time\nimport random\nimport pathlib\nimport argparse\nimport subprocess\nproc = None\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Set a random wallpaper per output')\n parser.add_argument('--folder', metavar='D', type=str, nargs=1, help=\n 'folder to search for images')\n parser.add_argument('--delay', metavar='S', type=int, help=\n 'How many seconds to wait before changing the wallpaper')\n args = parser.parse_args()\n while True:\n try:\n outputs = get_outputs()\n cmd = 'swaybg'\n for output in outputs:\n image = get_wallpaper(args.folder[0])\n cmd = f'{cmd} --image={image} --output={output}'\n print(cmd)\n proc = subprocess.Popen(cmd, shell=True)\n time.sleep(args.delay)\n proc.kill()\n except Exception as e:\n print(e, file=sys.stderr)\n finally:\n if proc:\n proc.kill()\n",
"step-5": "#!/usr/bin/env python\n\nimport sys\nimport json\nimport time\nimport random\nimport pathlib\nimport argparse\nimport subprocess\n\nproc = None\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir()\n if path.is_file()]\n\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n\n return [output['name'] for output in proc_json]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Set a random wallpaper per output')\n parser.add_argument('--folder', metavar='D', type=str, nargs=1,\n help='folder to search for images')\n\n parser.add_argument('--delay', metavar='S', type=int,\n help='How many seconds to wait before changing the wallpaper')\n\n args = parser.parse_args()\n\n while True:\n try:\n outputs = get_outputs()\n\n cmd = 'swaybg'\n\n for output in outputs:\n image = get_wallpaper(args.folder[0])\n cmd = f'{cmd} --image={image} --output={output}'\n\n print(cmd)\n proc = subprocess.Popen(cmd, shell=True)\n\n time.sleep(args.delay)\n proc.kill()\n except Exception as e:\n print(e, file=sys.stderr)\n\n finally:\n if proc:\n proc.kill()\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
# Developed by Lorenzo Mambretti, Justin Wang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/jtwwang/hanabi/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
#
import rl_env
import numpy as np
import os
import sys
import random
import getopt
import pickle
from agents.neuroEvo_agent import NeuroEvoAgent
from predictors.conv_pred import conv_pred
# To find local modules
sys.path.insert(0, os.path.join(os.getcwd(), 'agents'))
def model_crossover(weights1, weights2):
new_weights = []
assert len(weights1) == len(weights2)
if random.uniform(0, 1) > 0.3:
print("crossover")
for layer in range(len(weights1)):
# alternate odd and even layers
if layer % 2 == 0:
new_weights.append(weights1[layer])
else:
new_weights.append(weights2[layer])
else:
print("no crossover")
new_weights = weights1
return new_weights
def mutate_weights(weights):
for xi in range(len(weights)):
for yi in range(len(weights[xi])):
if random.uniform(0, 1) > 0.9:
change = random.uniform(-0.1, 0.1)
weights[xi][yi] += change
return weights
def make_mutation(ix_to_mutate, best_ones):
p = np.sort(scores)[2:]
p = p / np.sum(p)
# select the weights from parents
randomA = np.random.choice(best_ones, p=p)
randomB = np.random.choice(best_ones, p=p)
while randomB == randomA:
randomB = np.random.choice(best_ones, p=p)
weights1 = weights[randomA]
weights2 = weights[randomB]
# generate new weights
new_weights = model_crossover(weights1, weights2)
new_weights = mutate_weights(new_weights)
# change the weights of the target agent
weights[ix_to_mutate] = new_weights
def run(ix, initialize=False):
# initialize env
env = rl_env.make('Hanabi-Full', num_players=flags['players'])
agent_config = {
'players': flags['players'],
'num_moves': env.num_moves(),
'observation_size': env.vectorized_observation_shape()[0],
'model_name': str(ix),
'initialize': initialize}
agent = NeuroEvoAgent(agent_config)
avg_reward = 0
avg_steps = 0
for eps in range(flags['num_episodes']):
obs = env.reset() # Observation of all players
done = False
agent_id = 0
while not done:
ob = obs['player_observations'][agent_id]
try:
action = agent.act(ob)
except ValueError:
print('Something went wrong. Try to reinitialize the agents'
'pool by using --initialize True')
exit()
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_steps += 1
if done:
break
# change player
agent_id = (agent_id + 1) % flags['players']
n_eps = float(flags['num_episodes'])
avg_steps /= n_eps
avg_reward /= n_eps
agent.save(model_name=str(ix))
scores[ix] = avg_reward * 1000 + avg_steps
if __name__ == "__main__":
global flags, scores, weights
flags = {'players': 2,
'num_episodes': 100,
'initialize': False,
'models': 20,
'generations': 100}
options, arguments = getopt.getopt(sys.argv[1:], '',
['players=',
'num_episodes=',
'initialize=',
'models=',
'generations='])
if arguments:
sys.exit('usage: neuroEvo.py [options]\n'
'--players number of players in the game.\n'
'--num_episodes number of game episodes to run.\n'
'--initialize whether to re-initialize the weights'
'for all agents.\n')
for flag, value in options:
flag = flag[2:] # Strip leading --.
flags[flag] = type(flags[flag])(value)
# Initialize all models
current_pool = []
scores = np.zeros(flags['models'])
weights = {}
to_mutate = 0
# create one agent
agent = conv_pred("NeuroEvo_agent")
# load the file
filepath = os.path.join("model", "NeuroEvo_agent")
filepath = os.path.join(filepath, "scores.pickle")
if not flags['initialize']:
try:
scores = pickle.load(open(filepath, "rb"))
loaded = True
except IOError:
loaded = False
else:
loaded = False
print("Initialize")
# do an initial loop to evaluate all models
for i in range(flags['models']):
if flags['initialize'] or not loaded:
run(i, flags['initialize'])
agent.load(model_name=str(i))
weights[i] = agent.model.get_weights()
for gen in range(flags['generations']):
print("Generation %i " % gen)
# sort the results
ranking = np.argsort(scores)
print("best: %i with score %f" % (ranking[-1], scores[ranking[-1]]))
print("worst: %i with score %f" % (ranking[0], scores[ranking[0]]))
print("avg: %f" % (sum(scores)/flags['models']))
# divide worst from best
worst_ones = ranking[:2]
best_ones = ranking[2:]
# select the one to mutate and the one to use for the simulation
ix_to_mutate = worst_ones[to_mutate]
ix_to_simulate = worst_ones[1 - to_mutate]
run(ix_to_simulate)
make_mutation(ix_to_mutate, best_ones)
# update weights of mutated agent
agent.model.set_weights(weights[ix_to_mutate])
agent.save(model_name=str(ix_to_mutate))
# prepare for next generation
to_mutate = (to_mutate + 1) % 2
# save the rankings
pickle.dump(scores, open(filepath, "wb"))
print("Saved scores.")
|
normal
|
{
"blob_id": "bbd5eb1f80843efdd2709aa19a65bf325a88f473",
"index": 8856,
"step-1": "<mask token>\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\ndef run(ix, initialize=False):\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\n agent_config = {'players': flags['players'], 'num_moves': env.num_moves\n (), 'observation_size': env.vectorized_observation_shape()[0],\n 'model_name': str(ix), 'initialize': initialize}\n agent = NeuroEvoAgent(agent_config)\n avg_reward = 0\n avg_steps = 0\n for eps in range(flags['num_episodes']):\n obs = env.reset()\n done = False\n agent_id = 0\n while not done:\n ob = obs['player_observations'][agent_id]\n try:\n action = agent.act(ob)\n except ValueError:\n print(\n 'Something went wrong. Try to reinitialize the agentspool by using --initialize True'\n )\n exit()\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_steps += 1\n if done:\n break\n agent_id = (agent_id + 1) % flags['players']\n n_eps = float(flags['num_episodes'])\n avg_steps /= n_eps\n avg_reward /= n_eps\n agent.save(model_name=str(ix))\n scores[ix] = avg_reward * 1000 + avg_steps\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.join(os.getcwd(), 'agents'))\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\ndef run(ix, initialize=False):\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\n agent_config = {'players': flags['players'], 'num_moves': env.num_moves\n (), 'observation_size': env.vectorized_observation_shape()[0],\n 'model_name': str(ix), 'initialize': initialize}\n agent = NeuroEvoAgent(agent_config)\n avg_reward = 0\n avg_steps = 0\n for eps in range(flags['num_episodes']):\n obs = env.reset()\n done = False\n agent_id = 0\n while not done:\n ob = obs['player_observations'][agent_id]\n try:\n action = agent.act(ob)\n except ValueError:\n print(\n 'Something went wrong. Try to reinitialize the agentspool by using --initialize True'\n )\n exit()\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_steps += 1\n if done:\n break\n agent_id = (agent_id + 1) % flags['players']\n n_eps = float(flags['num_episodes'])\n avg_steps /= n_eps\n avg_reward /= n_eps\n agent.save(model_name=str(ix))\n scores[ix] = avg_reward * 1000 + avg_steps\n\n\nif __name__ == '__main__':\n global flags, scores, weights\n flags = {'players': 2, 'num_episodes': 100, 'initialize': False,\n 'models': 20, 'generations': 100}\n options, arguments = getopt.getopt(sys.argv[1:], '', ['players=',\n 'num_episodes=', 'initialize=', 'models=', 'generations='])\n if arguments:\n sys.exit(\n \"\"\"usage: neuroEvo.py [options]\n--players number of players in the game.\n--num_episodes number of game episodes to run.\n--initialize whether to re-initialize the weightsfor all agents.\n\"\"\"\n )\n for flag, value in options:\n flag = flag[2:]\n flags[flag] = type(flags[flag])(value)\n current_pool = []\n scores = np.zeros(flags['models'])\n weights = {}\n to_mutate = 0\n agent = conv_pred('NeuroEvo_agent')\n filepath = os.path.join('model', 'NeuroEvo_agent')\n filepath = os.path.join(filepath, 'scores.pickle')\n if not flags['initialize']:\n try:\n scores = pickle.load(open(filepath, 'rb'))\n loaded = True\n except IOError:\n loaded = False\n else:\n loaded = False\n print('Initialize')\n for i in range(flags['models']):\n if flags['initialize'] or not loaded:\n run(i, flags['initialize'])\n agent.load(model_name=str(i))\n weights[i] = agent.model.get_weights()\n for gen in range(flags['generations']):\n print('Generation %i ' % gen)\n ranking = np.argsort(scores)\n print('best: %i with score %f' % (ranking[-1], scores[ranking[-1]]))\n print('worst: %i with score %f' % (ranking[0], scores[ranking[0]]))\n print('avg: %f' % (sum(scores) / flags['models']))\n worst_ones = ranking[:2]\n best_ones = ranking[2:]\n ix_to_mutate = worst_ones[to_mutate]\n ix_to_simulate = worst_ones[1 - to_mutate]\n run(ix_to_simulate)\n make_mutation(ix_to_mutate, best_ones)\n agent.model.set_weights(weights[ix_to_mutate])\n agent.save(model_name=str(ix_to_mutate))\n to_mutate = (to_mutate + 1) % 2\n pickle.dump(scores, open(filepath, 'wb'))\n print('Saved scores.')\n",
"step-4": "import rl_env\nimport numpy as np\nimport os\nimport sys\nimport random\nimport getopt\nimport pickle\nfrom agents.neuroEvo_agent import NeuroEvoAgent\nfrom predictors.conv_pred import conv_pred\nsys.path.insert(0, os.path.join(os.getcwd(), 'agents'))\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\ndef run(ix, initialize=False):\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\n agent_config = {'players': flags['players'], 'num_moves': env.num_moves\n (), 'observation_size': env.vectorized_observation_shape()[0],\n 'model_name': str(ix), 'initialize': initialize}\n agent = NeuroEvoAgent(agent_config)\n avg_reward = 0\n avg_steps = 0\n for eps in range(flags['num_episodes']):\n obs = env.reset()\n done = False\n agent_id = 0\n while not done:\n ob = obs['player_observations'][agent_id]\n try:\n action = agent.act(ob)\n except ValueError:\n print(\n 'Something went wrong. Try to reinitialize the agentspool by using --initialize True'\n )\n exit()\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_steps += 1\n if done:\n break\n agent_id = (agent_id + 1) % flags['players']\n n_eps = float(flags['num_episodes'])\n avg_steps /= n_eps\n avg_reward /= n_eps\n agent.save(model_name=str(ix))\n scores[ix] = avg_reward * 1000 + avg_steps\n\n\nif __name__ == '__main__':\n global flags, scores, weights\n flags = {'players': 2, 'num_episodes': 100, 'initialize': False,\n 'models': 20, 'generations': 100}\n options, arguments = getopt.getopt(sys.argv[1:], '', ['players=',\n 'num_episodes=', 'initialize=', 'models=', 'generations='])\n if arguments:\n sys.exit(\n \"\"\"usage: neuroEvo.py [options]\n--players number of players in the game.\n--num_episodes number of game episodes to run.\n--initialize whether to re-initialize the weightsfor all agents.\n\"\"\"\n )\n for flag, value in options:\n flag = flag[2:]\n flags[flag] = type(flags[flag])(value)\n current_pool = []\n scores = np.zeros(flags['models'])\n weights = {}\n to_mutate = 0\n agent = conv_pred('NeuroEvo_agent')\n filepath = os.path.join('model', 'NeuroEvo_agent')\n filepath = os.path.join(filepath, 'scores.pickle')\n if not flags['initialize']:\n try:\n scores = pickle.load(open(filepath, 'rb'))\n loaded = True\n except IOError:\n loaded = False\n else:\n loaded = False\n print('Initialize')\n for i in range(flags['models']):\n if flags['initialize'] or not loaded:\n run(i, flags['initialize'])\n agent.load(model_name=str(i))\n weights[i] = agent.model.get_weights()\n for gen in range(flags['generations']):\n print('Generation %i ' % gen)\n ranking = np.argsort(scores)\n print('best: %i with score %f' % (ranking[-1], scores[ranking[-1]]))\n print('worst: %i with score %f' % (ranking[0], scores[ranking[0]]))\n print('avg: %f' % (sum(scores) / flags['models']))\n worst_ones = ranking[:2]\n best_ones = ranking[2:]\n ix_to_mutate = worst_ones[to_mutate]\n ix_to_simulate = worst_ones[1 - to_mutate]\n run(ix_to_simulate)\n make_mutation(ix_to_mutate, best_ones)\n agent.model.set_weights(weights[ix_to_mutate])\n agent.save(model_name=str(ix_to_mutate))\n to_mutate = (to_mutate + 1) % 2\n pickle.dump(scores, open(filepath, 'wb'))\n print('Saved scores.')\n",
"step-5": "# Developed by Lorenzo Mambretti, Justin Wang\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://github.com/jtwwang/hanabi/blob/master/LICENSE\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\r\n#\r\nimport rl_env\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport random\r\nimport getopt\r\nimport pickle\r\nfrom agents.neuroEvo_agent import NeuroEvoAgent\r\nfrom predictors.conv_pred import conv_pred\r\n# To find local modules\r\nsys.path.insert(0, os.path.join(os.getcwd(), 'agents'))\r\n\r\n\r\ndef model_crossover(weights1, weights2):\r\n\r\n new_weights = []\r\n assert len(weights1) == len(weights2)\r\n if random.uniform(0, 1) > 0.3:\r\n print(\"crossover\")\r\n for layer in range(len(weights1)):\r\n # alternate odd and even layers\r\n if layer % 2 == 0:\r\n new_weights.append(weights1[layer])\r\n else:\r\n new_weights.append(weights2[layer])\r\n else:\r\n print(\"no crossover\")\r\n new_weights = weights1\r\n\r\n return new_weights\r\n\r\n\r\ndef mutate_weights(weights):\r\n for xi in range(len(weights)):\r\n for yi in range(len(weights[xi])):\r\n if random.uniform(0, 1) > 0.9:\r\n change = random.uniform(-0.1, 0.1)\r\n weights[xi][yi] += change\r\n return weights\r\n\r\n\r\ndef make_mutation(ix_to_mutate, best_ones):\r\n\r\n p = np.sort(scores)[2:]\r\n p = p / np.sum(p)\r\n\r\n # select the weights from parents\r\n randomA = np.random.choice(best_ones, p=p)\r\n randomB = np.random.choice(best_ones, p=p)\r\n while randomB == randomA:\r\n randomB = np.random.choice(best_ones, p=p)\r\n weights1 = weights[randomA]\r\n weights2 = weights[randomB]\r\n\r\n # generate new weights\r\n new_weights = model_crossover(weights1, weights2)\r\n new_weights = mutate_weights(new_weights)\r\n\r\n # change the weights of the target agent\r\n weights[ix_to_mutate] = new_weights\r\n\r\n\r\ndef run(ix, initialize=False):\r\n\r\n # initialize env\r\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\r\n agent_config = {\r\n 'players': flags['players'],\r\n 'num_moves': env.num_moves(),\r\n 'observation_size': env.vectorized_observation_shape()[0],\r\n 'model_name': str(ix),\r\n 'initialize': initialize}\r\n\r\n agent = NeuroEvoAgent(agent_config)\r\n\r\n avg_reward = 0\r\n avg_steps = 0\r\n\r\n for eps in range(flags['num_episodes']):\r\n obs = env.reset() # Observation of all players\r\n done = False\r\n agent_id = 0\r\n\r\n while not done:\r\n ob = obs['player_observations'][agent_id]\r\n\r\n try:\r\n action = agent.act(ob)\r\n except ValueError:\r\n print('Something went wrong. Try to reinitialize the agents'\r\n 'pool by using --initialize True')\r\n exit()\r\n\r\n obs, reward, done, _ = env.step(action)\r\n\r\n avg_reward += reward\r\n avg_steps += 1\r\n\r\n if done:\r\n break\r\n\r\n # change player\r\n agent_id = (agent_id + 1) % flags['players']\r\n\r\n n_eps = float(flags['num_episodes'])\r\n avg_steps /= n_eps\r\n avg_reward /= n_eps\r\n\r\n agent.save(model_name=str(ix))\r\n scores[ix] = avg_reward * 1000 + avg_steps\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n global flags, scores, weights\r\n flags = {'players': 2,\r\n 'num_episodes': 100,\r\n 'initialize': False,\r\n 'models': 20,\r\n 'generations': 100}\r\n\r\n options, arguments = getopt.getopt(sys.argv[1:], '',\r\n ['players=',\r\n 'num_episodes=',\r\n 'initialize=',\r\n 'models=',\r\n 'generations='])\r\n if arguments:\r\n sys.exit('usage: neuroEvo.py [options]\\n'\r\n '--players number of players in the game.\\n'\r\n '--num_episodes number of game episodes to run.\\n'\r\n '--initialize whether to re-initialize the weights'\r\n 'for all agents.\\n')\r\n\r\n for flag, value in options:\r\n flag = flag[2:] # Strip leading --.\r\n flags[flag] = type(flags[flag])(value)\r\n\r\n # Initialize all models\r\n current_pool = []\r\n scores = np.zeros(flags['models'])\r\n weights = {}\r\n to_mutate = 0\r\n\r\n # create one agent\r\n agent = conv_pred(\"NeuroEvo_agent\")\r\n\r\n # load the file\r\n filepath = os.path.join(\"model\", \"NeuroEvo_agent\")\r\n filepath = os.path.join(filepath, \"scores.pickle\")\r\n if not flags['initialize']:\r\n try:\r\n scores = pickle.load(open(filepath, \"rb\"))\r\n loaded = True\r\n except IOError:\r\n loaded = False\r\n else:\r\n loaded = False\r\n\r\n print(\"Initialize\")\r\n # do an initial loop to evaluate all models\r\n for i in range(flags['models']):\r\n if flags['initialize'] or not loaded:\r\n run(i, flags['initialize'])\r\n agent.load(model_name=str(i))\r\n weights[i] = agent.model.get_weights()\r\n\r\n for gen in range(flags['generations']):\r\n\r\n print(\"Generation %i \" % gen)\r\n\r\n # sort the results\r\n ranking = np.argsort(scores)\r\n print(\"best: %i with score %f\" % (ranking[-1], scores[ranking[-1]]))\r\n print(\"worst: %i with score %f\" % (ranking[0], scores[ranking[0]]))\r\n print(\"avg: %f\" % (sum(scores)/flags['models']))\r\n\r\n # divide worst from best\r\n worst_ones = ranking[:2]\r\n best_ones = ranking[2:]\r\n\r\n # select the one to mutate and the one to use for the simulation\r\n ix_to_mutate = worst_ones[to_mutate]\r\n ix_to_simulate = worst_ones[1 - to_mutate]\r\n\r\n run(ix_to_simulate)\r\n make_mutation(ix_to_mutate, best_ones)\r\n\r\n # update weights of mutated agent\r\n agent.model.set_weights(weights[ix_to_mutate])\r\n agent.save(model_name=str(ix_to_mutate))\r\n\r\n # prepare for next generation\r\n to_mutate = (to_mutate + 1) % 2\r\n\r\n # save the rankings\r\n pickle.dump(scores, open(filepath, \"wb\"))\r\n print(\"Saved scores.\")\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy as np
# Read in training data and labels
# Some useful parsing functions
# male/female -> 0/1
def parseSexLabel(string):
if (string.startswith('male')):
return 0
if (string.startswith('female')):
return 1
print("ERROR parsing sex from " + string)
# child/teen/adult/senior -> 0/1/2/3
def parseAgeLabel(string):
if (string.startswith('child')):
return 0
if (string.startswith('teen')):
return 1
if (string.startswith('adult')):
return 2
if (string.startswith('senior')):
return 3
print("ERROR parsing age from " + string)
# serious/smiling -> 0/1
def parseExpLabel(string):
if (string.startswith('serious')):
return 0
if (string.startswith('smiling') or string.startswith('funny')):
return 1
print("ERROR parsing expression from " + string)
# Count number of training instances
numTraining = 0
for line in open ("MITFaces/faceDR"):
numTraining += 1
dimensions = 128*128
trainingFaces = np.zeros([numTraining,dimensions])
trainingSexLabels = np.zeros(numTraining) # Sex - 0 = male; 1 = female
trainingAgeLabels = np.zeros(numTraining) # Age - 0 = child; 1 = teen; 2 = male
trainingExpLabels = np.zeros(numTraining) # Expression - 0 = serious; 1 = smiling
index = 0
for line in open ("MITFaces/faceDR"):
# Parse the label data
parts = line.split()
trainingSexLabels[index] = parseSexLabel(parts[2])
trainingAgeLabels[index] = parseAgeLabel(parts[4])
trainingExpLabels[index] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
trainingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
# And move along
index += 1
# Count number of validation/testing instances
numValidation = 0
numTesting = 0
# Assume they're all Validation
for line in open ("MITFaces/faceDS"):
numValidation += 1
# And make half of them testing
numTesting = int(numValidation/2)
numValidation -= numTesting
validationFaces = np.zeros([numValidation,dimensions])
validationSexLabels = np.zeros(numValidation) # Sex - 0 = male; 1 = female
validationAgeLabels = np.zeros(numValidation) # Age - 0 = child; 1 = teen; 2 = male
validationExpLabels = np.zeros(numValidation) # Expression - 0 = serious; 1 = smiling
testingFaces = np.zeros([numTesting,dimensions])
testingSexLabels = np.zeros(numTesting) # Sex - 0 = male; 1 = female
testingAgeLabels = np.zeros(numTesting) # Age - 0 = child; 1 = teen; 2 = male
testingExpLabels = np.zeros(numTesting) # Expression - 0 = serious; 1 = smiling
index = 0
for line in open ("MITFaces/faceDS"):
# Parse the label data
parts = line.split()
if (index < numTesting):
testingSexLabels[index] = parseSexLabel(parts[2])
testingAgeLabels[index] = parseAgeLabel(parts[4])
testingExpLabels[index] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
testingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
else:
vIndex = index - numTesting
validationSexLabels[vIndex] = parseSexLabel(parts[2])
validationAgeLabels[vIndex] = parseAgeLabel(parts[4])
validationExpLabels[vIndex] = parseExpLabel(parts[8])
# Read in the face
fileName = "MITFaces/rawdata/" + parts[0]
fileIn = open(fileName, 'rb')
validationFaces[vIndex,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0
fileIn.close()
# And move along
index += 1
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
import tensorflow as tf
from tensorflow import keras
batch_size = 128
epochs = 12
x_train = trainingFaces
y_train = trainingSexLabels
x_test = testingFaces
y_test = testingSexLabels
y_train = keras.utils.to_categorical(y_train, num_classes=2)
y_test = keras.utils.to_categorical(y_test, num_classes=2)
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, activation='relu'))
model.add(keras.layers.Conv2D(16, kernel_size=(1,1),activation='relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=epochs,
verbose=1)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
normal
|
{
"blob_id": "6822a0a194e8b401fecfed2b617ddd5489302389",
"index": 4718,
"step-1": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\n<mask token>\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\ndef parseAgeLabel(string):\n if string.startswith('child'):\n return 0\n if string.startswith('teen'):\n return 1\n if string.startswith('adult'):\n return 2\n if string.startswith('senior'):\n return 3\n print('ERROR parsing age from ' + string)\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\ndef parseAgeLabel(string):\n if string.startswith('child'):\n return 0\n if string.startswith('teen'):\n return 1\n if string.startswith('adult'):\n return 2\n if string.startswith('senior'):\n return 3\n print('ERROR parsing age from ' + string)\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\n<mask token>\nfor line in open('MITFaces/faceDR'):\n numTraining += 1\n<mask token>\nfor line in open('MITFaces/faceDR'):\n parts = line.split()\n trainingSexLabels[index] = parseSexLabel(parts[2])\n trainingAgeLabels[index] = parseAgeLabel(parts[4])\n trainingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n trainingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n index += 1\n<mask token>\nfor line in open('MITFaces/faceDS'):\n numValidation += 1\n<mask token>\nnumValidation -= numTesting\n<mask token>\nfor line in open('MITFaces/faceDS'):\n parts = line.split()\n if index < numTesting:\n testingSexLabels[index] = parseSexLabel(parts[2])\n testingAgeLabels[index] = parseAgeLabel(parts[4])\n testingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n testingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n else:\n vIndex = index - numTesting\n validationSexLabels[vIndex] = parseSexLabel(parts[2])\n validationAgeLabels[vIndex] = parseAgeLabel(parts[4])\n validationExpLabels[vIndex] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n validationFaces[vIndex, :] = np.fromfile(fileIn, dtype=np.uint8,\n count=dimensions) / 255.0\n fileIn.close()\n index += 1\n<mask token>\nmodel.add(keras.layers.Dense(32, activation='relu'))\nmodel.add(keras.layers.Conv2D(16, kernel_size=(1, 1), activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.Dense(2, activation='softmax'))\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.\n optimizers.Adadelta(), metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=epochs, verbose=1)\n<mask token>\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n",
"step-4": "<mask token>\n\n\ndef parseSexLabel(string):\n if string.startswith('male'):\n return 0\n if string.startswith('female'):\n return 1\n print('ERROR parsing sex from ' + string)\n\n\ndef parseAgeLabel(string):\n if string.startswith('child'):\n return 0\n if string.startswith('teen'):\n return 1\n if string.startswith('adult'):\n return 2\n if string.startswith('senior'):\n return 3\n print('ERROR parsing age from ' + string)\n\n\ndef parseExpLabel(string):\n if string.startswith('serious'):\n return 0\n if string.startswith('smiling') or string.startswith('funny'):\n return 1\n print('ERROR parsing expression from ' + string)\n\n\nnumTraining = 0\nfor line in open('MITFaces/faceDR'):\n numTraining += 1\ndimensions = 128 * 128\ntrainingFaces = np.zeros([numTraining, dimensions])\ntrainingSexLabels = np.zeros(numTraining)\ntrainingAgeLabels = np.zeros(numTraining)\ntrainingExpLabels = np.zeros(numTraining)\nindex = 0\nfor line in open('MITFaces/faceDR'):\n parts = line.split()\n trainingSexLabels[index] = parseSexLabel(parts[2])\n trainingAgeLabels[index] = parseAgeLabel(parts[4])\n trainingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n trainingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n index += 1\nnumValidation = 0\nnumTesting = 0\nfor line in open('MITFaces/faceDS'):\n numValidation += 1\nnumTesting = int(numValidation / 2)\nnumValidation -= numTesting\nvalidationFaces = np.zeros([numValidation, dimensions])\nvalidationSexLabels = np.zeros(numValidation)\nvalidationAgeLabels = np.zeros(numValidation)\nvalidationExpLabels = np.zeros(numValidation)\ntestingFaces = np.zeros([numTesting, dimensions])\ntestingSexLabels = np.zeros(numTesting)\ntestingAgeLabels = np.zeros(numTesting)\ntestingExpLabels = np.zeros(numTesting)\nindex = 0\nfor line in open('MITFaces/faceDS'):\n parts = line.split()\n if index < numTesting:\n testingSexLabels[index] = parseSexLabel(parts[2])\n testingAgeLabels[index] = parseAgeLabel(parts[4])\n testingExpLabels[index] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n testingFaces[index, :] = np.fromfile(fileIn, dtype=np.uint8, count=\n dimensions) / 255.0\n fileIn.close()\n else:\n vIndex = index - numTesting\n validationSexLabels[vIndex] = parseSexLabel(parts[2])\n validationAgeLabels[vIndex] = parseAgeLabel(parts[4])\n validationExpLabels[vIndex] = parseExpLabel(parts[8])\n fileName = 'MITFaces/rawdata/' + parts[0]\n fileIn = open(fileName, 'rb')\n validationFaces[vIndex, :] = np.fromfile(fileIn, dtype=np.uint8,\n count=dimensions) / 255.0\n fileIn.close()\n index += 1\n<mask token>\nbatch_size = 128\nepochs = 12\nx_train = trainingFaces\ny_train = trainingSexLabels\nx_test = testingFaces\ny_test = testingSexLabels\ny_train = keras.utils.to_categorical(y_train, num_classes=2)\ny_test = keras.utils.to_categorical(y_test, num_classes=2)\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(32, activation='relu'))\nmodel.add(keras.layers.Conv2D(16, kernel_size=(1, 1), activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.Dense(2, activation='softmax'))\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.\n optimizers.Adadelta(), metrics=['accuracy'])\nmodel.fit(x_train, y_train, epochs=epochs, verbose=1)\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n",
"step-5": "import numpy as np\n\n# Read in training data and labels\n\n# Some useful parsing functions\n\n# male/female -> 0/1\ndef parseSexLabel(string):\n if (string.startswith('male')):\n return 0\n if (string.startswith('female')):\n return 1\n print(\"ERROR parsing sex from \" + string)\n\n# child/teen/adult/senior -> 0/1/2/3\ndef parseAgeLabel(string):\n if (string.startswith('child')):\n return 0\n if (string.startswith('teen')):\n return 1\n if (string.startswith('adult')):\n return 2\n if (string.startswith('senior')):\n return 3\n print(\"ERROR parsing age from \" + string)\n\n# serious/smiling -> 0/1\ndef parseExpLabel(string):\n if (string.startswith('serious')):\n return 0\n if (string.startswith('smiling') or string.startswith('funny')):\n return 1\n print(\"ERROR parsing expression from \" + string)\n\n# Count number of training instances\n\nnumTraining = 0\n\nfor line in open (\"MITFaces/faceDR\"):\n numTraining += 1\n\ndimensions = 128*128\n\ntrainingFaces = np.zeros([numTraining,dimensions])\ntrainingSexLabels = np.zeros(numTraining) # Sex - 0 = male; 1 = female\ntrainingAgeLabels = np.zeros(numTraining) # Age - 0 = child; 1 = teen; 2 = male \ntrainingExpLabels = np.zeros(numTraining) # Expression - 0 = serious; 1 = smiling\n\nindex = 0\nfor line in open (\"MITFaces/faceDR\"):\n # Parse the label data\n parts = line.split()\n trainingSexLabels[index] = parseSexLabel(parts[2])\n trainingAgeLabels[index] = parseAgeLabel(parts[4])\n trainingExpLabels[index] = parseExpLabel(parts[8])\n # Read in the face\n fileName = \"MITFaces/rawdata/\" + parts[0]\n fileIn = open(fileName, 'rb')\n trainingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0\n fileIn.close()\n # And move along\n index += 1\n\n# Count number of validation/testing instances\n\nnumValidation = 0\nnumTesting = 0\n\n# Assume they're all Validation\nfor line in open (\"MITFaces/faceDS\"):\n numValidation += 1\n\n# And make half of them testing\nnumTesting = int(numValidation/2)\nnumValidation -= numTesting\n\nvalidationFaces = np.zeros([numValidation,dimensions])\nvalidationSexLabels = np.zeros(numValidation) # Sex - 0 = male; 1 = female\nvalidationAgeLabels = np.zeros(numValidation) # Age - 0 = child; 1 = teen; 2 = male \nvalidationExpLabels = np.zeros(numValidation) # Expression - 0 = serious; 1 = smiling\n\ntestingFaces = np.zeros([numTesting,dimensions])\ntestingSexLabels = np.zeros(numTesting) # Sex - 0 = male; 1 = female\ntestingAgeLabels = np.zeros(numTesting) # Age - 0 = child; 1 = teen; 2 = male \ntestingExpLabels = np.zeros(numTesting) # Expression - 0 = serious; 1 = smiling\n\nindex = 0\nfor line in open (\"MITFaces/faceDS\"):\n # Parse the label data\n parts = line.split()\n\n if (index < numTesting):\n testingSexLabels[index] = parseSexLabel(parts[2])\n testingAgeLabels[index] = parseAgeLabel(parts[4])\n testingExpLabels[index] = parseExpLabel(parts[8])\n # Read in the face\n fileName = \"MITFaces/rawdata/\" + parts[0]\n fileIn = open(fileName, 'rb')\n testingFaces[index,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0\n fileIn.close()\n else:\n vIndex = index - numTesting\n validationSexLabels[vIndex] = parseSexLabel(parts[2])\n validationAgeLabels[vIndex] = parseAgeLabel(parts[4])\n validationExpLabels[vIndex] = parseExpLabel(parts[8])\n # Read in the face\n fileName = \"MITFaces/rawdata/\" + parts[0]\n fileIn = open(fileName, 'rb')\n validationFaces[vIndex,:] = np.fromfile(fileIn, dtype=np.uint8,count=dimensions)/255.0\n fileIn.close()\n \n # And move along\n index += 1\n\n\n\n\n\n\n\n\n\n\n'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nbatch_size = 128\nepochs = 12\n\nx_train = trainingFaces\ny_train = trainingSexLabels\nx_test = testingFaces\ny_test = testingSexLabels\n\ny_train = keras.utils.to_categorical(y_train, num_classes=2)\ny_test = keras.utils.to_categorical(y_test, num_classes=2)\n\nmodel = keras.models.Sequential()\nmodel.add(keras.layers.Dense(32, activation='relu'))\nmodel.add(keras.layers.Conv2D(16, kernel_size=(1,1),activation='relu'))\nmodel.add(keras.layers.Dropout(0.5))\nmodel.add(keras.layers.Dense(2, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n epochs=epochs,\n verbose=1)\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
#coding: utf-8
"""
1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.
2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:
https://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#
Use a função read_csv() para abrir os arquivos
"""
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "d95d899c6eae5a90c90d3d920ee40b38bf304805",
"index": 532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pass\n",
"step-3": "#coding: utf-8\n\"\"\" \n1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.\n\n\n2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:\nhttps://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#\nUse a função read_csv() para abrir os arquivos\n\n\"\"\"\n\nif __name__ == \"__main__\":\n\tpass",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# 导包
from time import sleep
from selenium import webdriver
# 实例化浏览器
driver = webdriver.Firefox()
# 打开页面
driver.get(r"F:\BaiduYunDownload\webdriverspace\sources\注册实例.html")
driver.maximize_window()
sleep(2)
# 定位注册A按钮并点击
driver.find_element_by_link_text("注册A网页").click()
# 获取当前敞口句柄
current_handle = driver.current_window_handle
print("当前敞口句柄:", current_handle)
# 获取所有窗口句柄
handles = driver.window_handles
print("所有敞口句柄:", handles)
# 遍历及切换
for handle in handles:
if current_handle != handle:
# 执行切换窗口方法
driver.switch_to.window(handle)
# 填写注册A信息
# 输入注册A信息
driver.find_element_by_css_selector("#userA").send_keys("admin")
sleep(1)
driver.find_element_by_css_selector("#passwordA").send_keys("123456")
sleep(1)
driver.find_element_by_css_selector("#telA").send_keys("18111265465")
sleep(1)
driver.find_element_by_css_selector("#emailA").send_keys("1188@qq.com")
# 截图并保存
driver.get_screenshot_as_file("../image/imag01.jpg")
sleep(2)
driver.quit()
|
normal
|
{
"blob_id": "f73a316b6020908472e35a7b78959a9bda6e8e56",
"index": 7810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('F:\\\\BaiduYunDownload\\\\webdriverspace\\\\sources\\\\注册实例.html')\ndriver.maximize_window()\nsleep(2)\ndriver.find_element_by_link_text('注册A网页').click()\n<mask token>\nprint('当前敞口句柄:', current_handle)\n<mask token>\nprint('所有敞口句柄:', handles)\nfor handle in handles:\n if current_handle != handle:\n driver.switch_to.window(handle)\n driver.find_element_by_css_selector('#userA').send_keys('admin')\n sleep(1)\n driver.find_element_by_css_selector('#passwordA').send_keys('123456')\n sleep(1)\n driver.find_element_by_css_selector('#telA').send_keys('18111265465')\n sleep(1)\n driver.find_element_by_css_selector('#emailA').send_keys('1188@qq.com')\n driver.get_screenshot_as_file('../image/imag01.jpg')\nsleep(2)\ndriver.quit()\n",
"step-3": "<mask token>\ndriver = webdriver.Firefox()\ndriver.get('F:\\\\BaiduYunDownload\\\\webdriverspace\\\\sources\\\\注册实例.html')\ndriver.maximize_window()\nsleep(2)\ndriver.find_element_by_link_text('注册A网页').click()\ncurrent_handle = driver.current_window_handle\nprint('当前敞口句柄:', current_handle)\nhandles = driver.window_handles\nprint('所有敞口句柄:', handles)\nfor handle in handles:\n if current_handle != handle:\n driver.switch_to.window(handle)\n driver.find_element_by_css_selector('#userA').send_keys('admin')\n sleep(1)\n driver.find_element_by_css_selector('#passwordA').send_keys('123456')\n sleep(1)\n driver.find_element_by_css_selector('#telA').send_keys('18111265465')\n sleep(1)\n driver.find_element_by_css_selector('#emailA').send_keys('1188@qq.com')\n driver.get_screenshot_as_file('../image/imag01.jpg')\nsleep(2)\ndriver.quit()\n",
"step-4": "from time import sleep\nfrom selenium import webdriver\ndriver = webdriver.Firefox()\ndriver.get('F:\\\\BaiduYunDownload\\\\webdriverspace\\\\sources\\\\注册实例.html')\ndriver.maximize_window()\nsleep(2)\ndriver.find_element_by_link_text('注册A网页').click()\ncurrent_handle = driver.current_window_handle\nprint('当前敞口句柄:', current_handle)\nhandles = driver.window_handles\nprint('所有敞口句柄:', handles)\nfor handle in handles:\n if current_handle != handle:\n driver.switch_to.window(handle)\n driver.find_element_by_css_selector('#userA').send_keys('admin')\n sleep(1)\n driver.find_element_by_css_selector('#passwordA').send_keys('123456')\n sleep(1)\n driver.find_element_by_css_selector('#telA').send_keys('18111265465')\n sleep(1)\n driver.find_element_by_css_selector('#emailA').send_keys('1188@qq.com')\n driver.get_screenshot_as_file('../image/imag01.jpg')\nsleep(2)\ndriver.quit()\n",
"step-5": "# 导包\nfrom time import sleep\nfrom selenium import webdriver\n\n# 实例化浏览器\ndriver = webdriver.Firefox()\n# 打开页面\ndriver.get(r\"F:\\BaiduYunDownload\\webdriverspace\\sources\\注册实例.html\")\ndriver.maximize_window()\nsleep(2)\n\n# 定位注册A按钮并点击\ndriver.find_element_by_link_text(\"注册A网页\").click()\n\n# 获取当前敞口句柄\ncurrent_handle = driver.current_window_handle\nprint(\"当前敞口句柄:\", current_handle)\n\n# 获取所有窗口句柄\nhandles = driver.window_handles\nprint(\"所有敞口句柄:\", handles)\n\n# 遍历及切换\n\nfor handle in handles:\n if current_handle != handle:\n # 执行切换窗口方法\n driver.switch_to.window(handle)\n # 填写注册A信息\n # 输入注册A信息\n driver.find_element_by_css_selector(\"#userA\").send_keys(\"admin\")\n sleep(1)\n driver.find_element_by_css_selector(\"#passwordA\").send_keys(\"123456\")\n sleep(1)\n driver.find_element_by_css_selector(\"#telA\").send_keys(\"18111265465\")\n sleep(1)\n driver.find_element_by_css_selector(\"#emailA\").send_keys(\"1188@qq.com\")\n # 截图并保存\n driver.get_screenshot_as_file(\"../image/imag01.jpg\")\nsleep(2)\ndriver.quit()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(mon: int, day: int) ->str:
return date(2016, mon, day).strftime('%a').upper()
<|reserved_special_token_1|>
from datetime import date
def solution(mon: int, day: int) ->str:
return date(2016, mon, day).strftime('%a').upper()
<|reserved_special_token_1|>
from datetime import date
def solution(mon: int, day: int) -> str:
return date(2016, mon, day).strftime("%a").upper()
|
flexible
|
{
"blob_id": "67385d6d58cc79037660be546d41ea9ba1f790fa",
"index": 5043,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(mon: int, day: int) ->str:\n return date(2016, mon, day).strftime('%a').upper()\n",
"step-3": "from datetime import date\n\n\ndef solution(mon: int, day: int) ->str:\n return date(2016, mon, day).strftime('%a').upper()\n",
"step-4": "from datetime import date\n\ndef solution(mon: int, day: int) -> str:\n return date(2016, mon, day).strftime(\"%a\").upper()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.